mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update Times of India
This commit is contained in:
parent
1651eedea8
commit
30b85e3dcb
@ -1,74 +1,58 @@
|
|||||||
import re, urllib
|
# vim:fileencoding=utf-8
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from lxml import html
|
||||||
|
|
||||||
|
allowed_sections = {'Top Headlines', 'Opinion', 'Science', 'Education', 'US', 'Pakistan', 'India Business', 'Tech News', 'Cricket', 'Bollywood'}
|
||||||
|
|
||||||
class TimesOfIndia(BasicNewsRecipe):
|
class TimesOfIndia(BasicNewsRecipe):
|
||||||
title = u'Times of India'
|
title = u'Times of India Headlines'
|
||||||
language = 'en_IN'
|
language = 'en'
|
||||||
|
description = 'Headline news from the Indian daily Times of India'
|
||||||
__author__ = 'Kovid Goyal'
|
__author__ = 'Kovid Goyal'
|
||||||
oldest_article = 1 #days
|
|
||||||
max_articles_per_feed = 25
|
|
||||||
|
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_attributes = ['style']
|
no_javascript = True
|
||||||
keep_only_tags = [
|
keep_only_tags = [dict(name='h1'), dict(id=['storydiv', 'contentarea'])]
|
||||||
{'class':re.compile(r'maintable12|prttabl')},
|
|
||||||
{'id':['mod-article-header',
|
|
||||||
'mod-a-body-after-first-para', 'mod-a-body-first-para']},
|
|
||||||
]
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
{'class':re.compile('tabsintbgshow|prvnxtbg')},
|
dict(name='div', attrs={'class':['video_list', 'rightpart', 'clearfix mTop15', 'footer_slider', 'read_more', 'flR', 'hide_new']}),
|
||||||
{'id':['fbrecommend', 'relmaindiv', 'shretxt', 'fbrecos', 'twtdiv',
|
dict(name='div', attrs={'id':[
|
||||||
'gpls', 'auim']},
|
'most_pop', 'relartstory', 'slidebox', 'tmpFbokk', 'twittersource',
|
||||||
{'class':['twitter-share-button', 'cmtmn']},
|
'reportAbuseDiv', 'result', 'yahoobuzzsyn', 'fb-root']}),
|
||||||
|
dict(style='float:right;margin-left:5px;'),
|
||||||
]
|
]
|
||||||
|
|
||||||
feeds = [
|
def parse_index(self):
|
||||||
('Top Stories',
|
index = 'http://timesofindia.indiatimes.com/home/headlines'
|
||||||
'http://timesofindia.indiatimes.com/rssfeedstopstories.cms'),
|
raw = self.index_to_soup(index, raw=True)
|
||||||
('India',
|
root = html.fromstring(raw)
|
||||||
'http://timesofindia.indiatimes.com/rssfeeds/-2128936835.cms'),
|
|
||||||
('World',
|
feeds = []
|
||||||
'http://timesofindia.indiatimes.com/rssfeeds/296589292.cms'),
|
current_section = None
|
||||||
('Mumbai',
|
current_articles = []
|
||||||
'http://timesofindia.indiatimes.com/rssfeeds/-2128838597.cms'),
|
|
||||||
('Entertainment',
|
toc = root.xpath('//div[@align="center"]/descendant::table[@class="cnt"]')[0]
|
||||||
'http://timesofindia.indiatimes.com/rssfeeds/1081479906.cms'),
|
|
||||||
('Cricket',
|
for x in toc.xpath('descendant::*[name()="h3" or (name()="ul" and @class="content")]'):
|
||||||
'http://timesofindia.indiatimes.com/rssfeeds/4719161.cms'),
|
if x.tag == 'h3':
|
||||||
('Sunday TOI',
|
if current_articles and current_section in allowed_sections:
|
||||||
'http://timesofindia.indiatimes.com/rssfeeds/1945062111.cms'),
|
feeds.append((current_section, current_articles))
|
||||||
('Life and Style',
|
current_section = html.tostring(x, method='text', encoding=unicode).strip()
|
||||||
'http://timesofindia.indiatimes.com/rssfeeds/2886704.cms'),
|
current_articles = []
|
||||||
('Business',
|
self.log(current_section)
|
||||||
'http://timesofindia.indiatimes.com/rssfeeds/1898055.cms'),
|
else:
|
||||||
('Mad Mad World',
|
for a in x.xpath('descendant::li/descendant::a[@href]'):
|
||||||
'http://timesofindia.indiatimes.com/rssfeeds/2178430.cms'),
|
title = html.tostring(a, method='text', encoding=unicode).strip()
|
||||||
('Most Read',
|
url = a.get('href')
|
||||||
'http://timesofindia.indiatimes.com/rssfeedmostread.cms')
|
if url.startswith('/'):
|
||||||
]
|
url = 'http://timesofindia.indiatimes.com' + url
|
||||||
|
self.log(' ', title)
|
||||||
|
current_articles.append({'title':title, 'url':url})
|
||||||
|
self.log('')
|
||||||
|
|
||||||
|
if current_articles and current_section in allowed_sections:
|
||||||
|
feeds.append((current_section, current_articles))
|
||||||
|
|
||||||
|
return feeds
|
||||||
|
|
||||||
def get_article_url(self, article):
|
|
||||||
try:
|
|
||||||
s = article.summary
|
|
||||||
return urllib.unquote(
|
|
||||||
re.search(r'href=".+?bookmark.cfm.+?link=(.+?)"', s).group(1))
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
link = article.get('link', None)
|
|
||||||
if link and link.split('/')[-1]=="story01.htm":
|
|
||||||
link=link.split('/')[-2]
|
|
||||||
encoding = {'0B': '.', '0C': '/', '0A': '0', '0F': '=', '0G': '&',
|
|
||||||
'0D': '?', '0E': '-', '0N': '.com', '0L': 'http://'}
|
|
||||||
for k, v in encoding.iteritems():
|
|
||||||
link = link.replace(k, v)
|
|
||||||
return link
|
|
||||||
|
|
||||||
def print_version(self, url):
|
|
||||||
return url + '?prtpage=1'
|
|
||||||
|
|
||||||
def preprocess_html(self, soup, *args):
|
|
||||||
byl = soup.find(attrs={'class':'byline'})
|
|
||||||
if byl is not None:
|
|
||||||
for l in byl.findAll('label'):
|
|
||||||
l.extract()
|
|
||||||
return soup
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user