Update indian_express.recipe

Switched to RSS feeds.
This commit is contained in:
unkn0w7n 2025-08-02 19:08:13 +05:30
parent ab89056f7e
commit da640add79

View File

@ -11,7 +11,6 @@ class IndianExpress(BasicNewsRecipe):
language = 'en_IN' language = 'en_IN'
__author__ = 'unkn0wn' __author__ = 'unkn0wn'
oldest_article = 1.15 # days oldest_article = 1.15 # days
max_articles_per_feed = 25
encoding = 'utf-8' encoding = 'utf-8'
masthead_url = 'https://indianexpress.com/wp-content/themes/indianexpress/images/indian-express-logo-n.svg' masthead_url = 'https://indianexpress.com/wp-content/themes/indianexpress/images/indian-express-logo-n.svg'
no_stylesheets = True no_stylesheets = True
@ -49,84 +48,118 @@ class IndianExpress(BasicNewsRecipe):
) )
] ]
def parse_index(self): recipe_specific_options = {
'days': {
'short': 'Oldest article to download from this news source. In days ',
'long': 'For example, 0.5, gives you articles from the past 12 hours',
'default': str(oldest_article),
},
'res': {
'short': 'For hi-res images, select a resolution from the\nfollowing options: 400, 800, 1200, 1600',
'long': 'This is useful for non e-ink devices.',
'default': '600',
},
}
section_list = [ def __init__(self, *args, **kwargs):
('Daily Briefing', 'https://indianexpress.com/section/live-news/'), BasicNewsRecipe.__init__(self, *args, **kwargs)
('Front Page', 'https://indianexpress.com/print/front-page/'), d = self.recipe_specific_options.get('days')
('India', 'https://indianexpress.com/section/india/'), if d and isinstance(d, str):
# ('Express Network', 'https://indianexpress.com/print/express-network/'), self.oldest_article = float(d)
('Delhi Confidential', 'https://indianexpress.com/section/delhi-confidential/'),
('Editorials', 'https://indianexpress.com/section/opinion/editorials/'), feeds = [
('Columns', 'https://indianexpress.com/section/opinion/columns/'), 'https://indianexpress.com/section/opinion/feed',
('UPSC-CSE Key', 'https://indianexpress.com/section/upsc-current-affairs/'), 'https://indianexpress.com/section/delhi-confidential/feed',
('Explained', 'https://indianexpress.com/section/explained/'), 'https://indianexpress.com/section/india/feed',
('Business', 'https://indianexpress.com/section/business/'), 'https://indianexpress.com/section/political-pulse/feed',
# ('Political Pulse', 'https://indianexpress.com/section/political-pulse/'), 'https://indianexpress.com/section/explained/feed',
('Sunday Eye', 'https://indianexpress.com/section/express-sunday-eye/'), 'https://indianexpress.com/section/business/feed/',
('World', 'https://indianexpress.com/section/world/'), 'https://indianexpress.com/section/upsc-current-affairs/feed',
# ('Education', 'https://indianexpress.com/section/education/'), 'https://indianexpress.com/section/express-sunday-eye/feed',
# ('Gadgets', 'https://indianexpress.com/section/technology/gadgets/'), 'http://indianexpress.com/section/world/feed',
('Tech Review', 'https://indianexpress.com/section/technology/tech-reviews/'), 'https://indianexpress.com/section/technology/feed',
# ('Techhook', 'https://indianexpress.com/section/technology/techook/'), 'https://indianexpress.com/section/entertainment/feed',
# ('Laptops', 'https://indianexpress.com/section/technology/laptops/'), 'https://indianexpress.com/feed',
# ('Mobiles & Tabs', 'https://indianexpress.com/section/technology/mobile-tabs/'),
('Science', 'https://indianexpress.com/section/technology/science/'),
('Movie Review', 'https://indianexpress.com/section/entertainment/movie-review/'),
] ]
feeds = [] # def parse_index(self):
# For each section title, fetch the article urls # section_list = [
for section in section_list: # ('Daily Briefing', 'https://indianexpress.com/section/live-news/'),
section_title = section[0] # ('Front Page', 'https://indianexpress.com/print/front-page/'),
section_url = section[1] # ('India', 'https://indianexpress.com/section/india/'),
self.log(section_title, section_url) # # ('Express Network', 'https://indianexpress.com/print/express-network/'),
soup = self.index_to_soup(section_url) # ('Delhi Confidential', 'https://indianexpress.com/section/delhi-confidential/'),
if '/world/' in section_url or '/explained/' in section_url: # ('Editorials', 'https://indianexpress.com/section/opinion/editorials/'),
articles = self.articles_from_page(soup) # ('Columns', 'https://indianexpress.com/section/opinion/columns/'),
else: # ('UPSC-CSE Key', 'https://indianexpress.com/section/upsc-current-affairs/'),
articles = self.articles_from_soup(soup) # ('Explained', 'https://indianexpress.com/section/explained/'),
if articles: # ('Business', 'https://indianexpress.com/section/business/'),
feeds.append((section_title, articles)) # # ('Political Pulse', 'https://indianexpress.com/section/political-pulse/'),
return feeds # ('Sunday Eye', 'https://indianexpress.com/section/express-sunday-eye/'),
# ('World', 'https://indianexpress.com/section/world/'),
# # ('Education', 'https://indianexpress.com/section/education/'),
# # ('Gadgets', 'https://indianexpress.com/section/technology/gadgets/'),
# ('Tech Review', 'https://indianexpress.com/section/technology/tech-reviews/'),
# # ('Techhook', 'https://indianexpress.com/section/technology/techook/'),
# # ('Laptops', 'https://indianexpress.com/section/technology/laptops/'),
# # ('Mobiles & Tabs', 'https://indianexpress.com/section/technology/mobile-tabs/'),
# ('Science', 'https://indianexpress.com/section/technology/science/'),
# ('Movie Review', 'https://indianexpress.com/section/entertainment/movie-review/'),
# ]
def articles_from_page(self, soup): # feeds = []
ans = []
for div in soup.findAll(attrs={'class': ['northeast-topbox', 'explained-section-grid']}):
for a in div.findAll('a', href=True):
if not a.find('img') and '/section/' not in a['href']:
url = a['href']
title = self.tag_to_string(a)
self.log('\t', title, '\n\t\t', url)
ans.append({'title': title, 'url': url, 'description': ''})
return ans
def articles_from_soup(self, soup): # # For each section title, fetch the article urls
ans = [] # for section in section_list:
div = soup.find('div', attrs={'class': ['nation', 'o-opin', 'myie-nation', 'opinion-more-wrapper']}) # section_title = section[0]
for art in div.findAll( # section_url = section[1]
attrs={'class': ['articles', 'o-opin-article', 'myie-articles']} # self.log(section_title, section_url)
): # soup = self.index_to_soup(section_url)
for a in art.findAll('a', href=True): # if '/world/' in section_url or '/explained/' in section_url:
if not a.find('img') and not any( # articles = self.articles_from_page(soup)
x in a['href'] for x in ['/profile/', '/agency/', '/section/'] # else:
): # articles = self.articles_from_soup(soup)
url = a['href'] # if articles:
title = self.tag_to_string(a) # feeds.append((section_title, articles))
desc = '' # return feeds
if p := (art.find('p') or art.find(attrs={'class': 'opinion-news-para'})):
desc = self.tag_to_string(p) # def articles_from_page(self, soup):
if da := art.find( # ans = []
attrs={'class': ['date', 'o-opin-date', 'opinion-date', 'my-time']} # for div in soup.findAll(attrs={'class': ['northeast-topbox', 'explained-section-grid']}):
): # for a in div.findAll('a', href=True):
date = parse_date(self.tag_to_string(da)).replace(tzinfo=None) # if not a.find('img') and '/section/' not in a['href']:
today = datetime.now() # url = a['href']
if (today - date) > timedelta(self.oldest_article): # title = self.tag_to_string(a)
continue # self.log('\t', title, '\n\t\t', url)
self.log('\t', title, '\n\t', desc, '\n\t\t', url) # ans.append({'title': title, 'url': url, 'description': ''})
ans.append({'title': title, 'url': url, 'description': desc}) # return ans
return ans
# def articles_from_soup(self, soup):
# ans = []
# div = soup.find('div', attrs={'class': ['nation', 'o-opin', 'myie-nation', 'opinion-more-wrapper']})
# for art in div.findAll(
# attrs={'class': ['articles', 'o-opin-article', 'myie-articles']}
# ):
# for a in art.findAll('a', href=True):
# if not a.find('img') and not any(
# x in a['href'] for x in ['/profile/', '/agency/', '/section/']
# ):
# url = a['href']
# title = self.tag_to_string(a)
# desc = ''
# if p := (art.find('p') or art.find(attrs={'class': 'opinion-news-para'})):
# desc = self.tag_to_string(p)
# if da := art.find(
# attrs={'class': ['date', 'o-opin-date', 'opinion-date', 'my-time']}
# ):
# date = parse_date(self.tag_to_string(da)).replace(tzinfo=None)
# today = datetime.now()
# if (today - date) > timedelta(self.oldest_article):
# continue
# self.log('\t', title, '\n\t', desc, '\n\t\t', url)
# ans.append({'title': title, 'url': url, 'description': desc})
# return ans
def get_cover_url(self): def get_cover_url(self):
soup = self.index_to_soup( soup = self.index_to_soup(
@ -136,6 +169,10 @@ class IndianExpress(BasicNewsRecipe):
return citem['content'].replace('300', '600') return citem['content'].replace('300', '600')
def preprocess_html(self, soup): def preprocess_html(self, soup):
width = '600'
w = self.recipe_specific_options.get('res')
if w and isinstance(w, str):
width = w
if h2 := (soup.find(attrs={'itemprop': 'description'}) or soup.find(**classes('synopsis'))): if h2 := (soup.find(attrs={'itemprop': 'description'}) or soup.find(**classes('synopsis'))):
h2.name = 'p' h2.name = 'p'
h2['id'] = 'sub-d' h2['id'] = 'sub-d'
@ -144,12 +181,12 @@ class IndianExpress(BasicNewsRecipe):
): ):
span['id'] = 'img-cap' span['id'] = 'img-cap'
for img in soup.findAll('img', attrs={'data-src': True}): for img in soup.findAll('img', attrs={'data-src': True}):
img['src'] = img['data-src'] img['src'] = img['data-src'].split('?')[0] + '?w=' + width
if span := soup.find('span', content=True, attrs={'itemprop': 'dateModified'}): # if span := soup.find('span', content=True, attrs={'itemprop': 'dateModified'}):
date = parse_date(span['content']).replace(tzinfo=None) # date = parse_date(span['content']).replace(tzinfo=None)
today = datetime.now() # today = datetime.now()
if (today - date) > timedelta(self.oldest_article): # if (today - date) > timedelta(self.oldest_article):
self.abort_article('Skipping old article') # self.abort_article('Skipping old article')
for img in soup.findAll('img', attrs={'src': True}): for img in soup.findAll('img', attrs={'src': True}):
img['src'] = img['src'].split('?')[0] + '?w=600' img['src'] = img['src'].split('?')[0] + '?w=' + width
return soup return soup