This commit is contained in:
Kovid Goyal 2012-04-19 10:34:12 +05:30
parent c85d2df15f
commit 5c0cd6e070

View File

@ -1,50 +1,40 @@
''' """
readitlaterlist.com Pocket Calibre Recipe v1.0
''' """
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = ''' __copyright__ = '''
2010, Darko Miletic <darko.miletic at gmail.com> 2010, Darko Miletic <darko.miletic at gmail.com>
2011, Przemyslaw Kryger <pkryger at gmail.com> 2011, Przemyslaw Kryger <pkryger at gmail.com>
2011, Keith Callenberg <keithcallenberg@gmail.com>
2012, tBunnyMan <Wag That Tail At Me dot com> 2012, tBunnyMan <Wag That Tail At Me dot com>
2012, Alayn Gortazar <zutoin at gmail dot com>
''' '''
from contextlib import closing from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag
import json
import urllib
import urllib2
class Readitlater(BasicNewsRecipe):
title = 'Read It Later' class Pocket(BasicNewsRecipe):
__author__ = 'Darko Miletic, Przemyslaw Kryger, Keith Callenberg, tBunnyMan, Alayn Gortazar' title = 'Pocket'
description = '''Personalized news feeds. Go to readitlaterlist.com to __author__ = 'Darko Miletic, Przemyslaw Kryger, Keith Callenberg, tBunnyMan'
setup up your news. Fill in your account description = '''Personalized news feeds. Go to getpocket.com to setup up \
username, and optionally you can add your password.''' your news. This version displays pages of articles from \
publisher = 'readitlaterlist.com' oldest to newest, with max & minimum counts, and marks articles \
read after downloading.'''
publisher = 'getpocket.com'
category = 'news, custom' category = 'news, custom'
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 50 max_articles_per_feed = 50
minimum_articles = 1 minimum_articles = 10
mark_as_read_after_dl = True
no_stylesheets = True no_stylesheets = True
use_embedded_content = False use_embedded_content = False
needs_subscription = True needs_subscription = True
KEY = '8e0p5f19A74emL3a47goP87m69d4VF8b' INDEX = u'http://getpocket.com'
API_TEXT_INDEX = 'https://text.readitlaterlist.com/'
API_INDEX = 'https://readitlaterlist.com/'
INDEX = 'https://getpocket.com/'
LOGIN = INDEX + u'/l' LOGIN = INDEX + u'/l'
enhanced_version = True readList = []
articles = []
feeds = [(u'Unread articles' , INDEX)]
def get_browser(self): def get_browser(self):
br = BasicNewsRecipe.get_browser() br = BasicNewsRecipe.get_browser()
if self.enhanced_version:
if self.username is not None: if self.username is not None:
br.open(self.LOGIN) br.open(self.LOGIN)
br.select_form(nr=0) br.select_form(nr=0)
@ -54,102 +44,88 @@ class Readitlater(BasicNewsRecipe):
br.submit() br.submit()
return br return br
def get_auth_params(self): def get_feeds(self):
auth_params = 'apikey=' + self.KEY self.report_progress(0, ('Fetching list of pages...'))
if self.username is not None: lfeeds = []
auth_params += '&username=' + self.username i = 1
if self.password is not None: feedurl = self.INDEX + u'/unread/1'
auth_params += '&password=' + self.password while True:
return auth_params title = u'Unread articles, page ' + str(i)
lfeeds.insert(0, (title, feedurl))
self.report_progress(0, ('Got ') + str(i) + (' pages'))
i += 1
soup = self.index_to_soup(feedurl)
ritem = soup.find('a', attrs={'id':'next', 'class':'active'})
if ritem is None:
break
feedurl = self.INDEX + ritem['href']
return lfeeds
def parse_index(self): def parse_index(self):
index = self.API_INDEX + 'v2/get?' + self.get_auth_params() totalfeeds = []
index += '&state=unread' articlesToGrab = self.max_articles_per_feed
index += '&count=' + str(self.max_articles_per_feed) lfeeds = self.get_feeds()
for feedobj in lfeeds:
open_func = getattr(self.browser, 'open_novisit', self.browser.open) if articlesToGrab < 1:
with closing(open_func(index)) as f: break
results = f.read() feedtitle, feedurl = feedobj
if not results: self.report_progress(0, ('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
raise RuntimeError('Could not fetch index!') articles = []
soup = self.index_to_soup(feedurl)
json_obj = json.loads(results) ritem = soup.find('ul', attrs={'id':'list'})
for item in reversed(ritem.findAll('li')):
if len(json_obj['list']) >= self.minimum_articles: if articlesToGrab < 1:
for item in json_obj['list'].iteritems(): break
# TODO: This URL should be modified by it's corresponding API call in a future.
# Actually is not possible to get the Article View potential throught an API call (12/04/2012)
if self.enhanced_version:
dataurl = self.INDEX + 'a/x/getArticle.php?itemId=' + item[1]['item_id']
else: else:
dataurl = self.API_TEXT_INDEX + 'v2/text?' + self.get_auth_params() articlesToGrab -= 1
dataurl += '&url=' + item[1]['url'] description = ''
self.articles.append({ atag = item.find('a', attrs={'class':'text'})
'title':item[1]['title'], if atag and atag.has_key('href'):
'date':item[1]['time_added'], url = self.INDEX + atag['href']
'url':dataurl, title = self.tag_to_string(item.div)
'description':item[1]['item_id'], date = strftime(self.timefmt)
'real_url':item[1]['url'] articles.append({
'title' :title
,'date' :date
,'url' :url
,'description':description
}) })
else: readLink = item.find('a', attrs={'class':'check'})['href']
self.readList.append(readLink)
totalfeeds.append((feedtitle, articles))
if len(self.readList) < self.minimum_articles:
raise Exception("Not enough articles in RIL! Change minimum_articles or add more.") raise Exception("Not enough articles in RIL! Change minimum_articles or add more.")
return totalfeeds
return [('Unread', self.articles)] def mark_as_read(self, markList):
br = self.get_browser()
def preprocess_raw_html(self, raw_html, url): for link in markList:
# get article and image urls from json object url = self.INDEX + link
if self.enhanced_version: print 'Marking read: ', url
json_obj = json.loads(raw_html) response = br.open(url)
self.images = {} print response.info()
for image in json_obj['article']['images']:
self.images[image] = json_obj['article']['images'][image]['src']
title = '<h1>{title}</h1>'.format(title=json_obj['article']['title'])
link = '<p>Original: <a href="{url}">{url}</a></p>'.format(url=json_obj['article']['resolvedUrl'])
html = link + title + json_obj['article']['article']
else:
html = raw_html
return html + '<hr />'
def preprocess_html(self, soup):
# Insert images on RIL_IMG_# divs
if self.enhanced_version:
for key, url in self.images.iteritems():
imgtag = Tag(soup, 'img')
imgtag['src'] = url
div = soup.find('div', attrs={'id':'RIL_IMG_' + key})
div.insert(0, imgtag)
return soup
def cleanup(self): def cleanup(self):
# From a list of urls, create a human-readable JSON string if self.mark_as_read_after_dl:
# suitable for passing to the ReadItLater SEND::READ method. self.mark_as_read(self.readList)
self.markAsRead(self.createMarkList(self.articles)) else:
pass
def createMarkList(self, articles):
urls = []
for article in self.articles:
urls.append(article['real_url'])
items = ['"%d": {"url": "%s"}' % (n,u) for n,u in enumerate(urls)]
s = '{\n %s\n}' % (',\n '.join(items),)
return s
def markAsRead(self, markList):
url = self.API_INDEX + 'v2/send'
values = {
'username' : self.username,
'password' : self.password,
'apikey' : self.KEY,
'read' : markList
}
data = urllib.urlencode(values)
def default_cover(self, cover_file):
'''
Create a generic cover for recipes that don't have a cover
This override adds time to the cover
'''
try: try:
print 'Calling ReadItLater API...' from calibre.ebooks import calibre_cover
request = urllib2.Request(url,data) title = self.title if isinstance(self.title, unicode) else \
response = urllib2.urlopen(request) self.title.decode(preferred_encoding, 'replace')
response.read() date = strftime(self.timefmt)
print 'response =', response.code time = strftime('[%I:%M %p]')
except urllib2.HTTPError as e: img_data = calibre_cover(title, date, time)
print 'The server could not fulfill the request: ', e cover_file.write(img_data)
except urllib2.URLError as e: cover_file.flush()
print 'The call to ReadItLater API failed:', e except:
self.log.exception('Failed to generate default cover')
return False
return True