Update indian_express.recipe

Switched to RSS feeds.
This commit is contained in:
unkn0w7n 2025-08-02 19:08:13 +05:30
parent ab89056f7e
commit da640add79

View File

@ -11,7 +11,6 @@ class IndianExpress(BasicNewsRecipe):
language = 'en_IN' language = 'en_IN'
__author__ = 'unkn0wn' __author__ = 'unkn0wn'
oldest_article = 1.15 # days oldest_article = 1.15 # days
max_articles_per_feed = 25
encoding = 'utf-8' encoding = 'utf-8'
masthead_url = 'https://indianexpress.com/wp-content/themes/indianexpress/images/indian-express-logo-n.svg' masthead_url = 'https://indianexpress.com/wp-content/themes/indianexpress/images/indian-express-logo-n.svg'
no_stylesheets = True no_stylesheets = True
@ -49,84 +48,118 @@ class IndianExpress(BasicNewsRecipe):
) )
] ]
def parse_index(self): recipe_specific_options = {
'days': {
'short': 'Oldest article to download from this news source. In days ',
'long': 'For example, 0.5, gives you articles from the past 12 hours',
'default': str(oldest_article),
},
'res': {
'short': 'For hi-res images, select a resolution from the\nfollowing options: 400, 800, 1200, 1600',
'long': 'This is useful for non e-ink devices.',
'default': '600',
},
}
section_list = [ def __init__(self, *args, **kwargs):
('Daily Briefing', 'https://indianexpress.com/section/live-news/'), BasicNewsRecipe.__init__(self, *args, **kwargs)
('Front Page', 'https://indianexpress.com/print/front-page/'), d = self.recipe_specific_options.get('days')
('India', 'https://indianexpress.com/section/india/'), if d and isinstance(d, str):
# ('Express Network', 'https://indianexpress.com/print/express-network/'), self.oldest_article = float(d)
('Delhi Confidential', 'https://indianexpress.com/section/delhi-confidential/'),
('Editorials', 'https://indianexpress.com/section/opinion/editorials/'),
('Columns', 'https://indianexpress.com/section/opinion/columns/'),
('UPSC-CSE Key', 'https://indianexpress.com/section/upsc-current-affairs/'),
('Explained', 'https://indianexpress.com/section/explained/'),
('Business', 'https://indianexpress.com/section/business/'),
# ('Political Pulse', 'https://indianexpress.com/section/political-pulse/'),
('Sunday Eye', 'https://indianexpress.com/section/express-sunday-eye/'),
('World', 'https://indianexpress.com/section/world/'),
# ('Education', 'https://indianexpress.com/section/education/'),
# ('Gadgets', 'https://indianexpress.com/section/technology/gadgets/'),
('Tech Review', 'https://indianexpress.com/section/technology/tech-reviews/'),
# ('Techhook', 'https://indianexpress.com/section/technology/techook/'),
# ('Laptops', 'https://indianexpress.com/section/technology/laptops/'),
# ('Mobiles & Tabs', 'https://indianexpress.com/section/technology/mobile-tabs/'),
('Science', 'https://indianexpress.com/section/technology/science/'),
('Movie Review', 'https://indianexpress.com/section/entertainment/movie-review/'),
]
feeds = [] feeds = [
'https://indianexpress.com/section/opinion/feed',
'https://indianexpress.com/section/delhi-confidential/feed',
'https://indianexpress.com/section/india/feed',
'https://indianexpress.com/section/political-pulse/feed',
'https://indianexpress.com/section/explained/feed',
'https://indianexpress.com/section/business/feed/',
'https://indianexpress.com/section/upsc-current-affairs/feed',
'https://indianexpress.com/section/express-sunday-eye/feed',
'http://indianexpress.com/section/world/feed',
'https://indianexpress.com/section/technology/feed',
'https://indianexpress.com/section/entertainment/feed',
'https://indianexpress.com/feed',
]
# For each section title, fetch the article urls # def parse_index(self):
for section in section_list:
section_title = section[0]
section_url = section[1]
self.log(section_title, section_url)
soup = self.index_to_soup(section_url)
if '/world/' in section_url or '/explained/' in section_url:
articles = self.articles_from_page(soup)
else:
articles = self.articles_from_soup(soup)
if articles:
feeds.append((section_title, articles))
return feeds
def articles_from_page(self, soup): # section_list = [
ans = [] # ('Daily Briefing', 'https://indianexpress.com/section/live-news/'),
for div in soup.findAll(attrs={'class': ['northeast-topbox', 'explained-section-grid']}): # ('Front Page', 'https://indianexpress.com/print/front-page/'),
for a in div.findAll('a', href=True): # ('India', 'https://indianexpress.com/section/india/'),
if not a.find('img') and '/section/' not in a['href']: # # ('Express Network', 'https://indianexpress.com/print/express-network/'),
url = a['href'] # ('Delhi Confidential', 'https://indianexpress.com/section/delhi-confidential/'),
title = self.tag_to_string(a) # ('Editorials', 'https://indianexpress.com/section/opinion/editorials/'),
self.log('\t', title, '\n\t\t', url) # ('Columns', 'https://indianexpress.com/section/opinion/columns/'),
ans.append({'title': title, 'url': url, 'description': ''}) # ('UPSC-CSE Key', 'https://indianexpress.com/section/upsc-current-affairs/'),
return ans # ('Explained', 'https://indianexpress.com/section/explained/'),
# ('Business', 'https://indianexpress.com/section/business/'),
# # ('Political Pulse', 'https://indianexpress.com/section/political-pulse/'),
# ('Sunday Eye', 'https://indianexpress.com/section/express-sunday-eye/'),
# ('World', 'https://indianexpress.com/section/world/'),
# # ('Education', 'https://indianexpress.com/section/education/'),
# # ('Gadgets', 'https://indianexpress.com/section/technology/gadgets/'),
# ('Tech Review', 'https://indianexpress.com/section/technology/tech-reviews/'),
# # ('Techhook', 'https://indianexpress.com/section/technology/techook/'),
# # ('Laptops', 'https://indianexpress.com/section/technology/laptops/'),
# # ('Mobiles & Tabs', 'https://indianexpress.com/section/technology/mobile-tabs/'),
# ('Science', 'https://indianexpress.com/section/technology/science/'),
# ('Movie Review', 'https://indianexpress.com/section/entertainment/movie-review/'),
# ]
def articles_from_soup(self, soup): # feeds = []
ans = []
div = soup.find('div', attrs={'class': ['nation', 'o-opin', 'myie-nation', 'opinion-more-wrapper']}) # # For each section title, fetch the article urls
for art in div.findAll( # for section in section_list:
attrs={'class': ['articles', 'o-opin-article', 'myie-articles']} # section_title = section[0]
): # section_url = section[1]
for a in art.findAll('a', href=True): # self.log(section_title, section_url)
if not a.find('img') and not any( # soup = self.index_to_soup(section_url)
x in a['href'] for x in ['/profile/', '/agency/', '/section/'] # if '/world/' in section_url or '/explained/' in section_url:
): # articles = self.articles_from_page(soup)
url = a['href'] # else:
title = self.tag_to_string(a) # articles = self.articles_from_soup(soup)
desc = '' # if articles:
if p := (art.find('p') or art.find(attrs={'class': 'opinion-news-para'})): # feeds.append((section_title, articles))
desc = self.tag_to_string(p) # return feeds
if da := art.find(
attrs={'class': ['date', 'o-opin-date', 'opinion-date', 'my-time']} # def articles_from_page(self, soup):
): # ans = []
date = parse_date(self.tag_to_string(da)).replace(tzinfo=None) # for div in soup.findAll(attrs={'class': ['northeast-topbox', 'explained-section-grid']}):
today = datetime.now() # for a in div.findAll('a', href=True):
if (today - date) > timedelta(self.oldest_article): # if not a.find('img') and '/section/' not in a['href']:
continue # url = a['href']
self.log('\t', title, '\n\t', desc, '\n\t\t', url) # title = self.tag_to_string(a)
ans.append({'title': title, 'url': url, 'description': desc}) # self.log('\t', title, '\n\t\t', url)
return ans # ans.append({'title': title, 'url': url, 'description': ''})
# return ans
# def articles_from_soup(self, soup):
# ans = []
# div = soup.find('div', attrs={'class': ['nation', 'o-opin', 'myie-nation', 'opinion-more-wrapper']})
# for art in div.findAll(
# attrs={'class': ['articles', 'o-opin-article', 'myie-articles']}
# ):
# for a in art.findAll('a', href=True):
# if not a.find('img') and not any(
# x in a['href'] for x in ['/profile/', '/agency/', '/section/']
# ):
# url = a['href']
# title = self.tag_to_string(a)
# desc = ''
# if p := (art.find('p') or art.find(attrs={'class': 'opinion-news-para'})):
# desc = self.tag_to_string(p)
# if da := art.find(
# attrs={'class': ['date', 'o-opin-date', 'opinion-date', 'my-time']}
# ):
# date = parse_date(self.tag_to_string(da)).replace(tzinfo=None)
# today = datetime.now()
# if (today - date) > timedelta(self.oldest_article):
# continue
# self.log('\t', title, '\n\t', desc, '\n\t\t', url)
# ans.append({'title': title, 'url': url, 'description': desc})
# return ans
def get_cover_url(self): def get_cover_url(self):
soup = self.index_to_soup( soup = self.index_to_soup(
@ -136,6 +169,10 @@ class IndianExpress(BasicNewsRecipe):
return citem['content'].replace('300', '600') return citem['content'].replace('300', '600')
def preprocess_html(self, soup): def preprocess_html(self, soup):
width = '600'
w = self.recipe_specific_options.get('res')
if w and isinstance(w, str):
width = w
if h2 := (soup.find(attrs={'itemprop': 'description'}) or soup.find(**classes('synopsis'))): if h2 := (soup.find(attrs={'itemprop': 'description'}) or soup.find(**classes('synopsis'))):
h2.name = 'p' h2.name = 'p'
h2['id'] = 'sub-d' h2['id'] = 'sub-d'
@ -144,12 +181,12 @@ class IndianExpress(BasicNewsRecipe):
): ):
span['id'] = 'img-cap' span['id'] = 'img-cap'
for img in soup.findAll('img', attrs={'data-src': True}): for img in soup.findAll('img', attrs={'data-src': True}):
img['src'] = img['data-src'] img['src'] = img['data-src'].split('?')[0] + '?w=' + width
if span := soup.find('span', content=True, attrs={'itemprop': 'dateModified'}): # if span := soup.find('span', content=True, attrs={'itemprop': 'dateModified'}):
date = parse_date(span['content']).replace(tzinfo=None) # date = parse_date(span['content']).replace(tzinfo=None)
today = datetime.now() # today = datetime.now()
if (today - date) > timedelta(self.oldest_article): # if (today - date) > timedelta(self.oldest_article):
self.abort_article('Skipping old article') # self.abort_article('Skipping old article')
for img in soup.findAll('img', attrs={'src': True}): for img in soup.findAll('img', attrs={'src': True}):
img['src'] = img['src'].split('?')[0] + '?w=600' img['src'] = img['src'].split('?')[0] + '?w=' + width
return soup return soup