Merge from trunk

This commit is contained in:
Sengian 2010-11-21 11:35:47 +01:00
commit 04aec11f94
13 changed files with 590 additions and 180 deletions

View File

@ -7,14 +7,22 @@ nytimes.com
'''
import re, string, time
from calibre import entity_to_unicode, strftime
from datetime import timedelta, date
from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, BeautifulStoneSoup
class NYTimes(BasicNewsRecipe):
# set headlinesOnly to True for the headlines-only version
# set headlinesOnly to True for the headlines-only version. If True, webEdition is ignored.
headlinesOnly = True
# set webEdition to True for the Web edition of the newspaper. Set oldest_article to the
# number of days old an article can be for inclusion. If oldest_article = 0 all articles
# will be included. Note: oldest_article is ignored if webEdition = False
webEdition = False
oldest_article = 7
# includeSections: List of sections to include. If empty, all sections found will be included.
# Otherwise, only the sections named will be included. For example,
#
@ -39,20 +47,76 @@ class NYTimes(BasicNewsRecipe):
# from an article (if one exists). If one_picture_per_article = True, the image
# will be moved to a location between the headline and the byline.
# If one_picture_per_article = False, all images from the article will be included
# and shown in their original location.
one_picture_per_article = True
one_picture_per_article = False
# The maximum number of articles that will be downloaded
max_articles_per_feed = 100
# Whether to omit duplicates of articles (typically arsing when articles are indexed in
# more than one section). If True, only the first occurance will be downloaded.
filterDuplicates = True
# Sections to collect for the Web edition.
# Delete any you don't want, or use includeSections or excludeSections
web_sections = [(u'World',u'world'),
(u'U.S.',u'national'),
(u'Politics',u'politics'),
(u'New York',u'nyregion'),
(u'Business','business'),
(u'Technology',u'technology'),
(u'Sports',u'sports'),
(u'Science',u'science'),
(u'Health',u'health'),
(u'Opinion',u'opinion'),
(u'Arts',u'arts'),
(u'Books',u'books'),
(u'Movies',u'movies'),
(u'Music',u'arts/music'),
(u'Television',u'arts/television'),
(u'Style',u'style'),
(u'Dining & Wine',u'dining'),
(u'Fashion & Style',u'fashion'),
(u'Home & Garden',u'garden'),
(u'Travel',u'travel'),
('Education',u'education'),
('Multimedia',u'multimedia'),
(u'Obituaries',u'obituaries'),
(u'Sunday Magazine',u'magazine'),
(u'Week in Review',u'weekinreview')]
if headlinesOnly:
title='New York Times Headlines'
description = 'Headlines from the New York Times'
needs_subscription = False
elif webEdition:
title='New York Times (Web)'
description = 'New York Times on the Web'
needs_subscription = True
else:
title='New York Times'
description = 'Today\'s New York Times'
needs_subscription = True
month_list = ['january','february','march','april','may','june','july','august','september','october','november','december']
def decode_us_date(self,datestr):
udate = datestr.strip().lower().split()
try:
m = self.month_list.index(udate[0])+1
except:
return date.today()
d = int(udate[1])
y = int(udate[2])
try:
d = date(y,m,d)
except:
d = date.today
return d
earliest_date = date.today() - timedelta(days=oldest_article)
__author__ = 'GRiker/Kovid Goyal/Nick Redding'
language = 'en'
@ -136,6 +200,12 @@ class NYTimes(BasicNewsRecipe):
.image {text-align: center;}
.source {text-align: left; }'''
articles = {}
key = None
ans = []
url_list = []
def filter_ans(self, ans) :
total_article_count = 0
idx = 0
@ -164,6 +234,29 @@ class NYTimes(BasicNewsRecipe):
self.log( "Queued %d articles" % total_article_count )
return ans
def exclude_url(self,url):
if not url.startswith("http"):
return True
if not url.endswith(".html"):
return True
if 'nytimes.com' not in url:
return True
if 'podcast' in url:
return True
if '/video/' in url:
return True
if '/slideshow/' in url:
return True
if '/magazine/index' in url:
return True
if '/interactive/' in url:
return True
if '/reference/' in url:
return True
if '/premium/' in url:
return True
return False
def fixChars(self,string):
# Replace lsquo (\x91)
fixed = re.sub("\x91","",string)
@ -249,7 +342,6 @@ class NYTimes(BasicNewsRecipe):
return BeautifulSoup(_raw, markupMassage=massage)
# Entry point
print "index_to_soup()"
soup = get_the_soup( self.encoding, url_or_raw )
contentType = soup.find(True,attrs={'http-equiv':'Content-Type'})
docEncoding = str(contentType)[str(contentType).find('charset=') + len('charset='):str(contentType).rfind('"')]
@ -273,33 +365,24 @@ class NYTimes(BasicNewsRecipe):
else:
return description
def parse_todays_index(self):
def feed_title(div):
def feed_title(self,div):
return ''.join(div.findAll(text=True, recursive=True)).strip()
articles = {}
key = None
ans = []
url_list = []
def handle_article(div):
def handle_article(self,div):
thumbnail = div.find('div','thumbnail')
if thumbnail:
thumbnail.extract()
a = div.find('a', href=True)
if not a:
return
url = re.sub(r'\?.*', '', a['href'])
if not url.startswith("http"):
return
if not url.endswith(".html"):
return
if 'podcast' in url:
return
if '/video/' in url:
if self.exclude_url(url):
return
url += '?pagewanted=all'
if url in url_list:
if self.filterDuplicates:
if url in self.url_list:
return
url_list.append(url)
self.url_list.append(url)
title = self.tag_to_string(a, use_alt=True).strip()
description = ''
pubdate = strftime('%a, %d %b')
@ -314,42 +397,78 @@ class NYTimes(BasicNewsRecipe):
authorAttribution = div.find(True, attrs={'class':'byline'})
if authorAttribution:
author = self.tag_to_string(authorAttribution, use_alt=False)
feed = key if key is not None else 'Uncategorized'
if not articles.has_key(feed):
ans.append(feed)
articles[feed] = []
articles[feed].append(
feed = self.key if self.key is not None else 'Uncategorized'
if not self.articles.has_key(feed):
self.ans.append(feed)
self.articles[feed] = []
self.articles[feed].append(
dict(title=title, url=url, date=pubdate,
description=description, author=author,
content=''))
def parse_web_edition(self):
for (sec_title,index_url) in self.web_sections:
if self.includeSections != []:
if sec_title not in self.includeSections:
print "SECTION NOT INCLUDED: ",sec_title
continue
if sec_title in self.excludeSections:
print "SECTION EXCLUDED: ",sec_title
continue
print 'Index URL: '+'http://www.nytimes.com/pages/'+index_url+'/index.html'
soup = self.index_to_soup('http://www.nytimes.com/pages/'+index_url+'/index.html')
self.key = sec_title
# Find each article
for div in soup.findAll(True,
attrs={'class':['section-headline', 'story', 'story headline','sectionHeader','headlinesOnly multiline flush']}):
if div['class'] in ['story', 'story headline'] :
self.handle_article(div)
elif div['class'] == 'headlinesOnly multiline flush':
for lidiv in div.findAll('li'):
self.handle_article(lidiv)
self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)]
return self.filter_ans(self.ans)
def parse_todays_index(self):
soup = self.index_to_soup('http://www.nytimes.com/pages/todayspaper/index.html')
skipping = False
# Find each article
for div in soup.findAll(True,
attrs={'class':['section-headline', 'story', 'story headline','sectionHeader','headlinesOnly multiline flush']}):
if div['class'] in ['section-headline','sectionHeader']:
key = string.capwords(feed_title(div))
key = key.replace('Op-ed','Op-Ed')
key = key.replace('U.s.','U.S.')
self.key = string.capwords(self.feed_title(div))
self.key = self.key.replace('Op-ed','Op-Ed')
self.key = self.key.replace('U.s.','U.S.')
self.key = self.key.replace('N.y.','N.Y.')
skipping = False
if self.includeSections != []:
if self.key not in self.includeSections:
print "SECTION NOT INCLUDED: ",self.key
skipping = True
if self.key in self.excludeSections:
print "SECTION EXCLUDED: ",self.key
skipping = True
elif div['class'] in ['story', 'story headline'] :
handle_article(div)
if not skipping:
self.handle_article(div)
elif div['class'] == 'headlinesOnly multiline flush':
for lidiv in div.findAll('li'):
handle_article(lidiv)
if not skipping:
self.handle_article(lidiv)
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
return self.filter_ans(ans)
self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)]
return self.filter_ans(self.ans)
def parse_headline_index(self):
articles = {}
ans = []
url_list = []
soup = self.index_to_soup('http://www.nytimes.com/pages/todaysheadlines/')
# Fetch the content table
@ -363,15 +482,24 @@ class NYTimes(BasicNewsRecipe):
for td_col in content_table.findAll('td', {'id' : re.compile('Column')}):
for div_sec in td_col.findAll('div',recursive=False):
for h6_sec_name in div_sec.findAll('h6',{'style' : re.compile('text-transform: *uppercase')}):
section_name = self.tag_to_string(h6_sec_name,use_alt=False)
section_name = re.sub(r'^ *$','',section_name)
if section_name == '':
continue
if self.includeSections != []:
if section_name not in self.includeSections:
print "SECTION NOT INCLUDED: ",section_name
continue
if section_name in self.excludeSections:
print "SECTION EXCLUDED: ",section_name
continue
section_name=string.capwords(section_name)
if section_name == 'U.s.':
section_name = 'U.S.'
elif section_name == 'Op-ed':
section_name = 'Op-Ed'
section_name = section_name.replace('Op-ed','Op-Ed')
section_name = section_name.replace('U.s.','U.S.')
section_name = section_name.replace('N.y.','N.Y.')
pubdate = strftime('%a, %d %b')
search_div = div_sec
@ -392,37 +520,32 @@ class NYTimes(BasicNewsRecipe):
if not a:
continue
url = re.sub(r'\?.*', '', a['href'])
if not url.startswith("http"):
continue
if not url.endswith(".html"):
continue
if 'podcast' in url:
continue
if 'video' in url:
if self.exclude_url(url):
continue
url += '?pagewanted=all'
if url in url_list:
if self.filterDuplicates:
if url in self.url_list:
continue
url_list.append(url)
self.log("URL %s" % url)
self.url_list.append(url)
title = self.tag_to_string(a, use_alt=True).strip()
desc = h3_item.find('p')
if desc is not None:
description = self.tag_to_string(desc,use_alt=False)
else:
description = ''
if not articles.has_key(section_name):
ans.append(section_name)
articles[section_name] = []
articles[section_name].append(dict(title=title, url=url, date=pubdate, description=description, author=author, content=''))
if not self.articles.has_key(section_name):
self.ans.append(section_name)
self.articles[section_name] = []
self.articles[section_name].append(dict(title=title, url=url, date=pubdate, description=description, author=author, content=''))
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
return self.filter_ans(ans)
self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)]
return self.filter_ans(self.ans)
def parse_index(self):
if self.headlinesOnly:
return self.parse_headline_index()
elif self.webEdition:
return self.parse_web_edition()
else:
return self.parse_todays_index()
@ -438,6 +561,21 @@ class NYTimes(BasicNewsRecipe):
def preprocess_html(self, soup):
if self.webEdition & (self.oldest_article>0):
date_tag = soup.find(True,attrs={'class': ['dateline','date']})
if date_tag:
date_str = self.tag_to_string(date_tag,use_alt=False)
date_str = date_str.replace('Published:','')
date_items = date_str.split(',')
try:
datestring = date_items[0]+' '+date_items[1]
article_date = self.decode_us_date(datestring)
except:
article_date = date.today()
if article_date < self.earliest_date:
self.log("Skipping article dated %s" % date_str)
return None
kicker_tag = soup.find(attrs={'class':'kicker'})
if kicker_tag: # remove Op_Ed author head shots
tagline = self.tag_to_string(kicker_tag)
@ -462,7 +600,6 @@ class NYTimes(BasicNewsRecipe):
for inlineImg in inlineImgs[1:]:
inlineImg.extract()
# Move firstImg before article body
#article_body = soup.find(True, {'id':'articleBody'})
cgFirst = soup.find(True, {'class':re.compile('columnGroup *first')})
if cgFirst:
# Strip all sibling NavigableStrings: noise
@ -548,4 +685,3 @@ class NYTimes(BasicNewsRecipe):
divTag.replaceWith(tag)
return soup

View File

@ -7,14 +7,22 @@ nytimes.com
'''
import re, string, time
from calibre import entity_to_unicode, strftime
from datetime import timedelta, date
from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, BeautifulStoneSoup
class NYTimes(BasicNewsRecipe):
# set headlinesOnly to True for the headlines-only version
# set headlinesOnly to True for the headlines-only version. If True, webEdition is ignored.
headlinesOnly = False
# set webEdition to True for the Web edition of the newspaper. Set oldest_article to the
# number of days old an article can be for inclusion. If oldest_article = 0 all articles
# will be included. Note: oldest_article is ignored if webEdition = False
webEdition = False
oldest_article = 7
# includeSections: List of sections to include. If empty, all sections found will be included.
# Otherwise, only the sections named will be included. For example,
#
@ -39,20 +47,76 @@ class NYTimes(BasicNewsRecipe):
# from an article (if one exists). If one_picture_per_article = True, the image
# will be moved to a location between the headline and the byline.
# If one_picture_per_article = False, all images from the article will be included
# and shown in their original location.
one_picture_per_article = True
one_picture_per_article = False
# The maximum number of articles that will be downloaded
max_articles_per_feed = 100
# Whether to omit duplicates of articles (typically arsing when articles are indexed in
# more than one section). If True, only the first occurance will be downloaded.
filterDuplicates = True
# Sections to collect for the Web edition.
# Delete any you don't want, or use includeSections or excludeSections
web_sections = [(u'World',u'world'),
(u'U.S.',u'national'),
(u'Politics',u'politics'),
(u'New York',u'nyregion'),
(u'Business','business'),
(u'Technology',u'technology'),
(u'Sports',u'sports'),
(u'Science',u'science'),
(u'Health',u'health'),
(u'Opinion',u'opinion'),
(u'Arts',u'arts'),
(u'Books',u'books'),
(u'Movies',u'movies'),
(u'Music',u'arts/music'),
(u'Television',u'arts/television'),
(u'Style',u'style'),
(u'Dining & Wine',u'dining'),
(u'Fashion & Style',u'fashion'),
(u'Home & Garden',u'garden'),
(u'Travel',u'travel'),
('Education',u'education'),
('Multimedia',u'multimedia'),
(u'Obituaries',u'obituaries'),
(u'Sunday Magazine',u'magazine'),
(u'Week in Review',u'weekinreview')]
if headlinesOnly:
title='New York Times Headlines'
description = 'Headlines from the New York Times'
needs_subscription = False
elif webEdition:
title='New York Times (Web)'
description = 'New York Times on the Web'
needs_subscription = True
else:
title='New York Times'
description = 'Today\'s New York Times'
needs_subscription = True
month_list = ['january','february','march','april','may','june','july','august','september','october','november','december']
def decode_us_date(self,datestr):
udate = datestr.strip().lower().split()
try:
m = self.month_list.index(udate[0])+1
except:
return date.today()
d = int(udate[1])
y = int(udate[2])
try:
d = date(y,m,d)
except:
d = date.today
return d
earliest_date = date.today() - timedelta(days=oldest_article)
__author__ = 'GRiker/Kovid Goyal/Nick Redding'
language = 'en'
@ -60,7 +124,6 @@ class NYTimes(BasicNewsRecipe):
timefmt = ''
needs_subscription = True
masthead_url = 'http://graphics8.nytimes.com/images/misc/nytlogo379x64.gif'
cover_margins = (18,18,'grey99')
@ -137,6 +200,12 @@ class NYTimes(BasicNewsRecipe):
.image {text-align: center;}
.source {text-align: left; }'''
articles = {}
key = None
ans = []
url_list = []
def filter_ans(self, ans) :
total_article_count = 0
idx = 0
@ -165,6 +234,29 @@ class NYTimes(BasicNewsRecipe):
self.log( "Queued %d articles" % total_article_count )
return ans
def exclude_url(self,url):
if not url.startswith("http"):
return True
if not url.endswith(".html"):
return True
if 'nytimes.com' not in url:
return True
if 'podcast' in url:
return True
if '/video/' in url:
return True
if '/slideshow/' in url:
return True
if '/magazine/index' in url:
return True
if '/interactive/' in url:
return True
if '/reference/' in url:
return True
if '/premium/' in url:
return True
return False
def fixChars(self,string):
# Replace lsquo (\x91)
fixed = re.sub("\x91","",string)
@ -250,7 +342,6 @@ class NYTimes(BasicNewsRecipe):
return BeautifulSoup(_raw, markupMassage=massage)
# Entry point
print "index_to_soup()"
soup = get_the_soup( self.encoding, url_or_raw )
contentType = soup.find(True,attrs={'http-equiv':'Content-Type'})
docEncoding = str(contentType)[str(contentType).find('charset=') + len('charset='):str(contentType).rfind('"')]
@ -274,33 +365,24 @@ class NYTimes(BasicNewsRecipe):
else:
return description
def parse_todays_index(self):
def feed_title(div):
def feed_title(self,div):
return ''.join(div.findAll(text=True, recursive=True)).strip()
articles = {}
key = None
ans = []
url_list = []
def handle_article(div):
def handle_article(self,div):
thumbnail = div.find('div','thumbnail')
if thumbnail:
thumbnail.extract()
a = div.find('a', href=True)
if not a:
return
url = re.sub(r'\?.*', '', a['href'])
if not url.startswith("http"):
return
if not url.endswith(".html"):
return
if 'podcast' in url:
return
if '/video/' in url:
if self.exclude_url(url):
return
url += '?pagewanted=all'
if url in url_list:
if self.filterDuplicates:
if url in self.url_list:
return
url_list.append(url)
self.url_list.append(url)
title = self.tag_to_string(a, use_alt=True).strip()
description = ''
pubdate = strftime('%a, %d %b')
@ -315,42 +397,78 @@ class NYTimes(BasicNewsRecipe):
authorAttribution = div.find(True, attrs={'class':'byline'})
if authorAttribution:
author = self.tag_to_string(authorAttribution, use_alt=False)
feed = key if key is not None else 'Uncategorized'
if not articles.has_key(feed):
ans.append(feed)
articles[feed] = []
articles[feed].append(
feed = self.key if self.key is not None else 'Uncategorized'
if not self.articles.has_key(feed):
self.ans.append(feed)
self.articles[feed] = []
self.articles[feed].append(
dict(title=title, url=url, date=pubdate,
description=description, author=author,
content=''))
def parse_web_edition(self):
for (sec_title,index_url) in self.web_sections:
if self.includeSections != []:
if sec_title not in self.includeSections:
print "SECTION NOT INCLUDED: ",sec_title
continue
if sec_title in self.excludeSections:
print "SECTION EXCLUDED: ",sec_title
continue
print 'Index URL: '+'http://www.nytimes.com/pages/'+index_url+'/index.html'
soup = self.index_to_soup('http://www.nytimes.com/pages/'+index_url+'/index.html')
self.key = sec_title
# Find each article
for div in soup.findAll(True,
attrs={'class':['section-headline', 'story', 'story headline','sectionHeader','headlinesOnly multiline flush']}):
if div['class'] in ['story', 'story headline'] :
self.handle_article(div)
elif div['class'] == 'headlinesOnly multiline flush':
for lidiv in div.findAll('li'):
self.handle_article(lidiv)
self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)]
return self.filter_ans(self.ans)
def parse_todays_index(self):
soup = self.index_to_soup('http://www.nytimes.com/pages/todayspaper/index.html')
skipping = False
# Find each article
for div in soup.findAll(True,
attrs={'class':['section-headline', 'story', 'story headline','sectionHeader','headlinesOnly multiline flush']}):
if div['class'] in ['section-headline','sectionHeader']:
key = string.capwords(feed_title(div))
key = key.replace('Op-ed','Op-Ed')
key = key.replace('U.s.','U.S.')
self.key = string.capwords(self.feed_title(div))
self.key = self.key.replace('Op-ed','Op-Ed')
self.key = self.key.replace('U.s.','U.S.')
self.key = self.key.replace('N.y.','N.Y.')
skipping = False
if self.includeSections != []:
if self.key not in self.includeSections:
print "SECTION NOT INCLUDED: ",self.key
skipping = True
if self.key in self.excludeSections:
print "SECTION EXCLUDED: ",self.key
skipping = True
elif div['class'] in ['story', 'story headline'] :
handle_article(div)
if not skipping:
self.handle_article(div)
elif div['class'] == 'headlinesOnly multiline flush':
for lidiv in div.findAll('li'):
handle_article(lidiv)
if not skipping:
self.handle_article(lidiv)
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
return self.filter_ans(ans)
self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)]
return self.filter_ans(self.ans)
def parse_headline_index(self):
articles = {}
ans = []
url_list = []
soup = self.index_to_soup('http://www.nytimes.com/pages/todaysheadlines/')
# Fetch the content table
@ -364,15 +482,24 @@ class NYTimes(BasicNewsRecipe):
for td_col in content_table.findAll('td', {'id' : re.compile('Column')}):
for div_sec in td_col.findAll('div',recursive=False):
for h6_sec_name in div_sec.findAll('h6',{'style' : re.compile('text-transform: *uppercase')}):
section_name = self.tag_to_string(h6_sec_name,use_alt=False)
section_name = re.sub(r'^ *$','',section_name)
if section_name == '':
continue
if self.includeSections != []:
if section_name not in self.includeSections:
print "SECTION NOT INCLUDED: ",section_name
continue
if section_name in self.excludeSections:
print "SECTION EXCLUDED: ",section_name
continue
section_name=string.capwords(section_name)
if section_name == 'U.s.':
section_name = 'U.S.'
elif section_name == 'Op-ed':
section_name = 'Op-Ed'
section_name = section_name.replace('Op-ed','Op-Ed')
section_name = section_name.replace('U.s.','U.S.')
section_name = section_name.replace('N.y.','N.Y.')
pubdate = strftime('%a, %d %b')
search_div = div_sec
@ -393,37 +520,32 @@ class NYTimes(BasicNewsRecipe):
if not a:
continue
url = re.sub(r'\?.*', '', a['href'])
if not url.startswith("http"):
continue
if not url.endswith(".html"):
continue
if 'podcast' in url:
continue
if 'video' in url:
if self.exclude_url(url):
continue
url += '?pagewanted=all'
if url in url_list:
if self.filterDuplicates:
if url in self.url_list:
continue
url_list.append(url)
self.log("URL %s" % url)
self.url_list.append(url)
title = self.tag_to_string(a, use_alt=True).strip()
desc = h3_item.find('p')
if desc is not None:
description = self.tag_to_string(desc,use_alt=False)
else:
description = ''
if not articles.has_key(section_name):
ans.append(section_name)
articles[section_name] = []
articles[section_name].append(dict(title=title, url=url, date=pubdate, description=description, author=author, content=''))
if not self.articles.has_key(section_name):
self.ans.append(section_name)
self.articles[section_name] = []
self.articles[section_name].append(dict(title=title, url=url, date=pubdate, description=description, author=author, content=''))
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
return self.filter_ans(ans)
self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)]
return self.filter_ans(self.ans)
def parse_index(self):
if self.headlinesOnly:
return self.parse_headline_index()
elif self.webEdition:
return self.parse_web_edition()
else:
return self.parse_todays_index()
@ -439,6 +561,21 @@ class NYTimes(BasicNewsRecipe):
def preprocess_html(self, soup):
if self.webEdition & (self.oldest_article>0):
date_tag = soup.find(True,attrs={'class': ['dateline','date']})
if date_tag:
date_str = self.tag_to_string(date_tag,use_alt=False)
date_str = date_str.replace('Published:','')
date_items = date_str.split(',')
try:
datestring = date_items[0]+' '+date_items[1]
article_date = self.decode_us_date(datestring)
except:
article_date = date.today()
if article_date < self.earliest_date:
self.log("Skipping article dated %s" % date_str)
return None
kicker_tag = soup.find(attrs={'class':'kicker'})
if kicker_tag: # remove Op_Ed author head shots
tagline = self.tag_to_string(kicker_tag)
@ -463,7 +600,6 @@ class NYTimes(BasicNewsRecipe):
for inlineImg in inlineImgs[1:]:
inlineImg.extract()
# Move firstImg before article body
#article_body = soup.find(True, {'id':'articleBody'})
cgFirst = soup.find(True, {'class':re.compile('columnGroup *first')})
if cgFirst:
# Strip all sibling NavigableStrings: noise

View File

@ -6,6 +6,7 @@ __copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
spiegel.de
'''
from time import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class Spiegel_ger(BasicNewsRecipe):
@ -44,3 +45,6 @@ class Spiegel_ger(BasicNewsRecipe):
rmain, rsep, rrest = main.rpartition(',')
purl = rmain + ',druck-' + rrest + ',' + rest
return purl
def get_cover_url(self):
return 'http://wissen.spiegel.de/wissen/titel/SP/' + strftime("%Y/%W/%j/titel.jpg")

View File

@ -0,0 +1,34 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1289990851(BasicNewsRecipe):
title = u'TSN'
oldest_article = 7
max_articles_per_feed = 50
language = 'en_CA'
__author__ = 'Nexus'
no_stylesheets = True
INDEX = 'http://tsn.ca/nhl/story/?id=nhl'
keep_only_tags = [dict(name='div', attrs={'id':['tsnColWrap']}),
dict(name='div', attrs={'id':['tsnStory']})]
remove_tags = [dict(name='div', attrs={'id':'tsnRelated'}),
dict(name='div', attrs={'class':'textSize'})]
def parse_index(self):
feeds = []
soup = self.index_to_soup(self.INDEX)
feed_parts = soup.findAll('div', attrs={'class': 'feature'})
for feed_part in feed_parts:
articles = []
if not feed_part.h2:
continue
feed_title = feed_part.h2.string
article_parts = feed_part.findAll('a')
for article_part in article_parts:
article_title = article_part.string
article_date = ''
article_url = 'http://tsn.ca/' + article_part['href']
articles.append({'title': article_title, 'url': article_url, 'description':'', 'date':article_date})
if articles:
feeds.append((feed_title, articles))
return feeds

View File

@ -0,0 +1,63 @@
#!/usr/bin/env python
# -*- coding: utf-8 mode: python -*-
__license__ = 'GPL v3'
__copyright__ = '2010, Steffen Siebert <calibre at steffensiebert.de>'
__docformat__ = 'restructuredtext de'
__version__ = '1.1'
"""
Die Zeit EPUB
"""
import os, urllib2, zipfile, re
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ptempfile import PersistentTemporaryFile
class ZeitEPUBAbo(BasicNewsRecipe):
title = u'Zeit Online Premium'
description = u'Das EPUB Abo der Zeit (needs subscription)'
language = 'de'
lang = 'de-DE'
__author__ = 'Steffen Siebert'
needs_subscription = True
conversion_options = {
'no_default_epub_cover' : True
}
def build_index(self):
domain = "http://premium.zeit.de"
url = domain + "/abovorteile/cgi-bin/_er_member/p4z.fpl?ER_Do=getUserData&ER_NextTemplate=login_ok"
browser = self.get_browser()
browser.add_password("http://premium.zeit.de", self.username, self.password)
try:
browser.open(url)
except urllib2.HTTPError:
self.report_progress(0,_("Can't login to download issue"))
raise ValueError('Failed to login, check your username and password')
response = browser.follow_link(text="DIE ZEIT als E-Paper")
response = browser.follow_link(url_regex=re.compile('^http://contentserver.hgv-online.de/nodrm/fulfillment\\?distributor=zeit-online&orderid=zeit_online.*'))
tmp = PersistentTemporaryFile(suffix='.epub')
self.report_progress(0,_('downloading epub'))
tmp.write(response.read())
tmp.close()
zfile = zipfile.ZipFile(tmp.name, 'r')
self.report_progress(0,_('extracting epub'))
zfile.extractall(self.output_dir)
tmp.close()
index = os.path.join(self.output_dir, 'content.opf')
self.report_progress(1,_('epub downloaded and extracted'))
return index

View File

@ -678,6 +678,15 @@ class NookOutput(OutputProfile):
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class NookColorOutput(NookOutput):
name = 'Nook Color'
short_name = 'nook_color'
description = _('This profile is intended for the B&N Nook Color.')
screen_size = (600, 980)
comic_screen_size = (584, 980)
dpi = 169
class BambookOutput(OutputProfile):
author = 'Li Fanxi'
@ -698,6 +707,6 @@ output_profiles = [OutputProfile, SonyReaderOutput, SonyReader300Output,
iPadOutput, KoboReaderOutput, TabletOutput,
SonyReaderLandscapeOutput, KindleDXOutput, IlliadOutput,
IRexDR1000Output, IRexDR800Output, JetBook5Output, NookOutput,
BambookOutput, ]
BambookOutput, NookColorOutput]
output_profiles.sort(cmp=lambda x,y:cmp(x.name.lower(), y.name.lower()))

View File

@ -5,7 +5,7 @@ __license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt4.Qt import QIcon, QMenu
from PyQt4.Qt import QIcon, QMenu, Qt
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.preferences.main import Preferences
@ -41,5 +41,7 @@ class PreferencesAction(InterfaceAction):
return
d = Preferences(self.gui, initial_plugin=initial_plugin)
d.show()
d.run_wizard_requested.connect(self.gui.run_wizard,
type=Qt.QueuedConnection)

View File

@ -155,6 +155,8 @@ class Browser(QScrollArea): # {{{
class Preferences(QMainWindow):
run_wizard_requested = pyqtSignal()
def __init__(self, gui, initial_plugin=None):
QMainWindow.__init__(self, gui)
self.gui = gui
@ -195,6 +197,11 @@ class Preferences(QMainWindow):
self.cw.setLayout(QVBoxLayout())
self.cw.layout().addWidget(self.stack)
self.bb = QDialogButtonBox(QDialogButtonBox.Close)
self.wizard_button = self.bb.addButton(_('Run welcome wizard'),
self.bb.DestructiveRole)
self.wizard_button.setIcon(QIcon(I('wizard.png')))
self.wizard_button.clicked.connect(self.run_wizard,
type=Qt.QueuedConnection)
self.cw.layout().addWidget(self.bb)
self.bb.rejected.connect(self.close, type=Qt.QueuedConnection)
self.setCentralWidget(self.cw)
@ -240,6 +247,9 @@ class Preferences(QMainWindow):
if plugin is not None:
self.show_plugin(plugin)
def run_wizard(self):
self.close()
self.run_wizard_requested.emit()
def show_plugin(self, plugin):
self.showing_widget = plugin.create_widget(self.scroll_area)

View File

@ -38,6 +38,7 @@ class Device(object):
name = 'Default'
manufacturer = 'Default'
id = 'default'
supports_color = False
@classmethod
def set_output_profile(cls):
@ -56,6 +57,12 @@ class Device(object):
def commit(cls):
cls.set_output_profile()
cls.set_output_format()
if cls.supports_color:
from calibre.ebooks.conversion.config import load_defaults, save_defaults
recs = load_defaults('comic_input')
recs['dont_grayscale'] = True
save_defaults('comic_input', recs)
class Kindle(Device):
@ -138,6 +145,12 @@ class Nook(Sony505):
manufacturer = 'Barnes & Noble'
output_profile = 'nook'
class NookColor(Nook):
id = 'nook_color'
name = 'Nook Color'
output_profile = 'nook_color'
supports_color = True
class CybookG3(Device):
name = 'Cybook Gen 3'
@ -178,6 +191,7 @@ class iPhone(Device):
output_format = 'EPUB'
manufacturer = 'Apple'
id = 'iphone'
supports_color = True
class Android(Device):
@ -185,6 +199,7 @@ class Android(Device):
output_format = 'EPUB'
manufacturer = 'Android'
id = 'android'
supports_color = True
class HanlinV3(Device):
@ -354,11 +369,6 @@ class StanzaPage(QWizardPage, StanzaUI):
return FinishPage.ID
def commit(self):
from calibre.ebooks.conversion.config import load_defaults, save_defaults
recs = load_defaults('comic_input')
recs['dont_grayscale'] = True
save_defaults('comic_input', recs)
p = self.set_port()
if p is not None:
from calibre.library.server import server_config

View File

@ -56,6 +56,7 @@ class cmd_commit(_cmd_commit):
summary = self.get_trac_summary(bug, url)
if summary:
msg = msg.replace('#%s'%bug, '#%s (%s)'%(bug, summary))
msg = msg.replace('Fixesed', 'Fixed')
return msg, bug, url, action

View File

@ -4345,7 +4345,7 @@ msgid ""
"changed.<br><br>Please confirm you want to proceed."
msgstr ""
"Boekformaten en metagegevens van de selectie zullen toegevoegd worden aan "
"het <b>eerst geselecteerde boek</b>. ISBN zal <i>niet</i> samengevoegd "
"het <b>eerst geselecteerde boek</b> (%s). ISBN zal <i>niet</i> samengevoegd "
"worden.<br><br>De geselecteerde boeken zullen niet verwijderd of aangepast "
"worden.<br><br>Bevestig als je wilt doorgaan."
@ -4360,7 +4360,7 @@ msgid ""
"you <b>sure</b> you want to proceed?"
msgstr ""
"Boekformaten en metagegevens van de selectie zullen toegevoegd worden aan "
"het <b>eerst geselecteerde boek</b>. ISBN zal <i>niet</i> samengevoegd "
"het <b>eerst geselecteerde boek</b> (%s). ISBN zal <i>niet</i> samengevoegd "
"worden.<br><br>Na samenvoeging zullen de geselecteerde boeken van je "
"computer <b>verwijderd</b> worden.<br><br>Weet je zeker dat je door wilt "
"gaan?"

View File

@ -105,7 +105,10 @@ def sendmail(msg, from_, to, localhost=None, verbose=0, timeout=30,
try:
s.sendmail(from_, to, msg)
finally:
try:
ret = s.quit()
except:
pass # Ignore so as to not hide original error
return ret
def option_parser():

View File

@ -61,8 +61,10 @@ def serialize_recipe(urn, recipe_class):
def serialize_collection(mapping_of_recipe_classes):
collection = E.recipe_collection()
for urn, recipe_class in mapping_of_recipe_classes.items():
recipe = serialize_recipe(urn, recipe_class)
for urn in sorted(mapping_of_recipe_classes.keys(),
key=lambda key: getattr(mapping_of_recipe_classes[key], 'title',
'zzz')):
recipe = serialize_recipe(urn, mapping_of_recipe_classes[urn])
collection.append(recipe)
collection.set('count', str(len(collection)))
return etree.tostring(collection, encoding='utf-8', xml_declaration=True,