Improved Read It Later

This commit is contained in:
Kovid Goyal 2012-02-04 10:27:26 +05:30
parent b65b295dd9
commit 9c9ed534e3

View File

@ -1,30 +1,36 @@
"""
readitlaterlist.com
"""
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = ''' __copyright__ = '''
2010, Darko Miletic <darko.miletic at gmail.com> 2010, Darko Miletic <darko.miletic at gmail.com>
2011, Przemyslaw Kryger <pkryger at gmail.com> 2011, Przemyslaw Kryger <pkryger at gmail.com>
''' 2012, tBunnyMan <Wag That Tail At Me dot com>
'''
readitlaterlist.com
''' '''
from calibre import strftime from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Readitlater(BasicNewsRecipe): class Readitlater(BasicNewsRecipe):
title = 'Read It Later' title = 'ReadItLater'
__author__ = 'Darko Miletic, Przemyslaw Kryger' __author__ = 'Darko Miletic, Przemyslaw Kryger, Keith Callenberg, tBunnyMan'
description = '''Personalized news feeds. Go to readitlaterlist.com to description = '''Personalized news feeds. Go to readitlaterlist.com to setup \
setup up your news. Fill in your account up your news. This version displays pages of articles from \
username, and optionally you can add password.''' oldest to newest, with max & minimum counts, and marks articles \
publisher = 'readitlater.com' read after downloading.'''
publisher = 'readitlaterlist.com'
category = 'news, custom' category = 'news, custom'
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 50
minimum_articles = 1
no_stylesheets = True no_stylesheets = True
use_embedded_content = False use_embedded_content = False
needs_subscription = True needs_subscription = True
INDEX = u'http://readitlaterlist.com' INDEX = u'http://readitlaterlist.com'
LOGIN = INDEX + u'/l' LOGIN = INDEX + u'/l'
readList = []
def get_browser(self): def get_browser(self):
br = BasicNewsRecipe.get_browser() br = BasicNewsRecipe.get_browser()
@ -33,41 +39,46 @@ class Readitlater(BasicNewsRecipe):
br.select_form(nr=0) br.select_form(nr=0)
br['feed_id'] = self.username br['feed_id'] = self.username
if self.password is not None: if self.password is not None:
br['password'] = self.password br['password'] = self.password
br.submit() br.submit()
return br return br
def get_feeds(self): def get_feeds(self):
self.report_progress(0, ('Fetching list of feeds...')) self.report_progress(0, ('Fetching list of pages...'))
lfeeds = [] lfeeds = []
i = 1 i = 1
feedurl = self.INDEX + u'/unread/1' feedurl = self.INDEX + u'/unread/1'
while True: while True:
title = u'Unread articles, page ' + str(i) title = u'Unread articles, page ' + str(i)
lfeeds.append((title, feedurl)) lfeeds.insert(0, (title, feedurl))
self.report_progress(0, ('Got ') + str(i) + (' feeds')) self.report_progress(0, ('Got ') + str(i) + (' pages'))
i += 1 i += 1
soup = self.index_to_soup(feedurl) soup = self.index_to_soup(feedurl)
ritem = soup.find('a',attrs={'id':'next', 'class':'active'}) ritem = soup.find('a', attrs={'id':'next', 'class':'active'})
if ritem is None: if ritem is None:
break break
feedurl = self.INDEX + ritem['href'] feedurl = self.INDEX + ritem['href']
if self.test:
return lfeeds[:2]
return lfeeds return lfeeds
def parse_index(self): def parse_index(self):
totalfeeds = [] totalfeeds = []
articlesToGrab = self.max_articles_per_feed
lfeeds = self.get_feeds() lfeeds = self.get_feeds()
for feedobj in lfeeds: for feedobj in lfeeds:
if articlesToGrab < 1:
break
feedtitle, feedurl = feedobj feedtitle, feedurl = feedobj
self.report_progress(0, ('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl)) self.report_progress(0, ('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
articles = [] articles = []
soup = self.index_to_soup(feedurl) soup = self.index_to_soup(feedurl)
ritem = soup.find('ul',attrs={'id':'list'}) ritem = soup.find('ul', attrs={'id':'list'})
for item in ritem.findAll('li'): for item in reversed(ritem.findAll('li')):
if articlesToGrab < 1:
break
else:
articlesToGrab -= 1
description = '' description = ''
atag = item.find('a',attrs={'class':'text'}) atag = item.find('a', attrs={'class':'text'})
if atag and atag.has_key('href'): if atag and atag.has_key('href'):
url = self.INDEX + atag['href'] url = self.INDEX + atag['href']
title = self.tag_to_string(item.div) title = self.tag_to_string(item.div)
@ -78,6 +89,20 @@ class Readitlater(BasicNewsRecipe):
,'url' :url ,'url' :url
,'description':description ,'description':description
}) })
readLink = item.find('a', attrs={'class':'check'})['href']
self.readList.append(readLink)
totalfeeds.append((feedtitle, articles)) totalfeeds.append((feedtitle, articles))
if len(self.readList) < self.minimum_articles:
raise Exception("Not enough articles in RIL! Change minimum_articles or add more.")
return totalfeeds return totalfeeds
def mark_as_read(self, markList):
br = self.get_browser()
for link in markList:
url = self.INDEX + link
response = br.open(url)
response
def cleanup(self):
self.mark_as_read(self.readList)