Update eenadu

This commit is contained in:
unkn0w7n 2024-04-14 19:29:03 +05:30
parent 4aa11d3b3e
commit 4459698437
2 changed files with 193 additions and 222 deletions

View File

@ -1,7 +1,4 @@
import re from urllib.parse import quote
from datetime import date, datetime, timedelta
from calibre.utils.date import parse_date
from calibre.web.feeds.news import BasicNewsRecipe, classes from calibre.web.feeds.news import BasicNewsRecipe, classes
@ -10,118 +7,116 @@ class eenadu_ts(BasicNewsRecipe):
__author__ = 'unkn0wn' __author__ = 'unkn0wn'
description = 'THE LARGEST CIRCULATED TELUGU DAILY' description = 'THE LARGEST CIRCULATED TELUGU DAILY'
language = 'te' language = 'te'
use_embedded_content = False
remove_javascript = True
no_stylesheets = True
remove_attributes = ['height', 'width', 'style']
ignore_duplicate_articles = {'url', 'title'}
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
cover_url = 'https://d66zsp32hue2v.cloudfront.net/Eenadu/2022/08/08/GTH/5_01/d5041804_01_mr.jpg'
encoding = 'utf-8' encoding = 'utf-8'
no_stylesheets = True
remove_javascript = True
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
remove_attributes = ['style', 'height', 'width']
ignore_duplicate_articles = {'url', 'title'}
reverse_article_order = True
remove_empty_feeds = True remove_empty_feeds = True
extra_css = '.pub-t{font-size:small; font-style:italic;}' simultaneous_downloads = 1
art_url = ''
keep_only_tags = [ extra_css = '''
dict(name='h1'), img {display:block; margin:0 auto;}
dict(**classes('pub-t')), blockquote, em {color:#202020;}
classes('fullstory text-justify contlist-cont'), .pub-t{font-size:small; font-style:italic;}
dict(name='span', attrs={'id': 'PDSAIApbreak'}), '''
]
remove_tags = [ keep_only_tags = [classes('bookWrapper fullstory')]
dict(name='span', attrs={'style': 'float:left; margin-right:10px;'}), remove_tags = [classes('ext-link offset-tb1 sshare-c')]
dict(
name='p',
attrs={
'style':
'font-size: 18px !important; margin: 0px; margin-top: -15px; text-align: center;flex: 1;'
}
),
dict(name='aside', attrs={'class': lambda x: x and x.startswith('thumb')}),
dict(name='br'),
classes('sshare-c tags andbeyond_ad fnt20 arti more2 offset-tb1 msb-list')
]
def parse_index(self): articles_are_obfuscated = True
section_list = [
('తెలంగాణ తాజా వార్తలు', 'telangana'), def get_obfuscated_article(self, url):
('సంపాదకీయం', 'telangana/editorial'), br = self.get_browser()
('తెలంగాణ ప్రధానాంశాలు', 'telangana/top-news'), soup = self.index_to_soup(url)
('తెలంగాణ జిల్లా వార్తలు', 'telangana/districts'), link = soup.a['href']
# ('క్రైమ్', 'crime'), skip_sections =[ # add sections you want to skip
'/video/', '/videos/', '/multimedia/', 'marathi', 'hindi', 'bangla'
]
if any(x in link for x in skip_sections):
self.abort_article('skipping video links')
self.log('Found ', link)
self.art_url = link
html = br.open(link).read()
return ({ 'data': html, 'url': link })
resolve_internal_links = True
remove_empty_feeds = True
def get_cover_url(self):
import json
from datetime import date
today = quote(date.today().strftime('%d/%m/%Y'), safe='')
raw = self.index_to_soup(
'https://epaper.eenadu.net/Home/GetAllpages?editionid=1&editiondate=' + today, raw=True
)
for cov in json.loads(raw):
if cov['NewsProPageTitle'].lower().startswith('front'):
return cov['HighResolution']
feeds = []
when = '27' # hours
index = 'https://www.eenadu.net'
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=te-IN&gl=IN&ceid=IN:te'
news = index + '/telugu-news/'
news_list = [
('తెలంగాణ ప్రధానాంశాలు', 'ts-top-news'),
('సంపాదకీయం', 'editorial'),
('వ్యాఖ్యానం', 'vyakyanam'),
('హైదరాబాద్ జిల్లా వార్తలు', 'districts/Hyderabad'),
('క్రైమ్', 'crime'),
('పాలిటిక్స్', 'politics'), ('పాలిటిక్స్', 'politics'),
('జాతీయం', 'india'), ('జాతీయం', 'india'),
('బిజినెస్', 'business'), ('బిజినెస్', 'business'),
('అంతర్జాతీయం', 'world'), ('అంతర్జాతీయం', 'world'),
('క్రీడలు', 'sports'), ('క్రీడలు', 'sports'),
# ('సినిమా', 'movies'), ('సినిమా', 'movies'),
# ('చదువు', 'education'), ('వసుంధర', 'women'),
# ('సుఖీభవ', 'health'), ('ఈ-నాడు', 'technology'),
# ('ఈ-నాడు', 'technology'), ('వెబ్ ప్రత్యేకం', 'explained')
# ('మకరందం', 'devotional'),
# ('ఈ తరం', 'youth'),
# ('ఆహా', 'recipes'),
# ('హాయ్ బుజ్జీ', 'kids-stories'),
# ('స్థిరాస్తి', 'real-estate'),
] ]
is_sunday = date.today().weekday() == 6 for n in news_list:
if is_sunday: news_index = news + n[1] + '/'
section_list.append(('సండే మ్యాగజైన్', 'sunday-magazine')) feeds.append((n[0], a.format(when, quote(news_index, safe=''))))
feeds = [] feeds.append(('Other News', a.format(when, quote(news, safe=''))))
# For each section title, fetch the article urls
for section in section_list:
section_title = section[0]
section_url = 'https://www.eenadu.net/' + section[1]
self.log(section_title, section_url)
soup = self.index_to_soup(section_url)
articles = self.articles_from_soup(soup)
if articles:
feeds.append((section_title, articles))
return feeds
def articles_from_soup(self, soup): art = index + '/telugu-article/'
ans = [] art_list = [
for link in soup.findAll( ('చదువు', 'education'),
attrs={ ('సుఖీభవ', 'health'),
'class': ['telugu_uni_body', 'thumb-content-more', 'owl-carousel'] ('ఆహా', 'recipes'),
} ('హాయ్ బుజ్జీ', 'kids-stories'),
): ('మకరందం', 'devotional'),
for a in link.findAll('a', attrs={'href': True}): ('దేవతార్చన', 'temples'),
url = a['href'] ('స్థిరాస్తి', 'real-estate'),
if url.startswith('https') is False: ('కథామృతం', 'kathalu'),
url = 'https://www.eenadu.net/' + url ('సండే మ్యాగజైన్', 'sunday-magazine')
]
for x in art_list:
art_index = art + x[1] + '/'
feeds.append((x[0], a.format(when, quote(art_index, safe=''))))
feeds.append(('Other Articles', a.format(when, quote(art, safe=''))))
try: feeds.append(('ఇతరులు', a.format(when, quote(index, safe=''))))
desc = self.tag_to_string(a.find('div')).strip() feeds.append(('ప్రతిభ', a.format(when, 'https://pratibha.eenadu.net/')))
except Exception:
desc = ''
for h3 in a.findAll('h3'): def populate_article_metadata(self, article, soup, first):
title = self.tag_to_string(h3).strip() article.url = self.art_url
sub = re.escape(title) article.title = article.title.replace(' - Eenadu', '')
desc = re.sub(sub, '', desc).strip() desc = soup.find(attrs={'class':'srtdes'})
if desc:
article.summary = self.tag_to_string(desc)
article.text_summary = article.summary
if not title or not url: def preprocess_raw_html(self, raw, *a):
continue import re
if '<!--Top Full Story Start -->' in raw:
self.log('\t', title, '\n\t', desc, '\n\t\t', url) body = re.search(r'<!--Top Full Story Start -->([^~]+?)<!--Tags Start -->', raw)
ans.append({'title': title, 'url': url, 'description': desc}) return '<html><body><div>' + body.group(1) + '</div></body></html>'
return ans return raw
def preprocess_html(self, soup):
div = soup.find('div', **classes('pub-t'))
if div:
date = parse_date(
self.tag_to_string(div).strip().replace('Published : ', '').replace(
'Updated : ', ''
).replace(' IST', ':00.000001')
).replace(tzinfo=None)
today = datetime.now()
if (today - date) > timedelta(1.15):
self.abort_article('Skipping old article')
else:
self.abort_article('not an article')
for img in soup.findAll('img', attrs={'data-src': True}):
img['src'] = img['data-src']
return soup

View File

@ -1,7 +1,4 @@
import re from urllib.parse import quote
from datetime import date, datetime, timedelta
from calibre.utils.date import parse_date
from calibre.web.feeds.news import BasicNewsRecipe, classes from calibre.web.feeds.news import BasicNewsRecipe, classes
@ -10,137 +7,116 @@ class eenadu_ap(BasicNewsRecipe):
__author__ = 'unkn0wn' __author__ = 'unkn0wn'
description = 'THE LARGEST CIRCULATED TELUGU DAILY' description = 'THE LARGEST CIRCULATED TELUGU DAILY'
language = 'te' language = 'te'
use_embedded_content = False
remove_javascript = True
no_stylesheets = True
remove_attributes = ['height', 'width', 'style']
ignore_duplicate_articles = {'url', 'title'}
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
cover_url = 'https://d66zsp32hue2v.cloudfront.net/Eenadu/2022/08/03/CAN/5_01/bfff5654_01_mr.jpg'
encoding = 'utf-8' encoding = 'utf-8'
no_stylesheets = True
remove_javascript = True
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
remove_attributes = ['style', 'height', 'width']
ignore_duplicate_articles = {'url', 'title'}
reverse_article_order = True
remove_empty_feeds = True remove_empty_feeds = True
extra_css = '.pub-t{font-size:small; font-style:italic;}' simultaneous_downloads = 1
art_url = ''
keep_only_tags = [ extra_css = '''
dict(name='h1'), img {display:block; margin:0 auto;}
dict(**classes('pub-t')), blockquote, em {color:#202020;}
classes('fullstory text-justify contlist-cont'), .pub-t{font-size:small; font-style:italic;}
dict(name='span', attrs={'id': 'PDSAIApbreak'}), '''
]
remove_tags = [ keep_only_tags = [classes('bookWrapper fullstory')]
dict(name='span', attrs={'style': 'float:left; margin-right:10px;'}), remove_tags = [classes('ext-link offset-tb1 sshare-c')]
dict(
name='p', articles_are_obfuscated = True
attrs={
'style': def get_obfuscated_article(self, url):
'font-size: 18px !important; margin: 0px; margin-top: -15px; text-align: center;flex: 1;' br = self.get_browser()
} soup = self.index_to_soup(url)
), link = soup.a['href']
dict(name='aside', attrs={'class': lambda x: x and x.startswith('thumb')}), skip_sections =[ # add sections you want to skip
dict(name='br'), '/video/', '/videos/', '/multimedia/', 'marathi', 'hindi', 'bangla'
classes('sshare-c tags andbeyond_ad fnt20 arti more2 offset-tb1 msb-list') ]
] if any(x in link for x in skip_sections):
self.abort_article('skipping video links')
self.log('Found ', link)
self.art_url = link
html = br.open(link).read()
return ({ 'data': html, 'url': link })
resolve_internal_links = True
remove_empty_feeds = True
def get_cover_url(self): def get_cover_url(self):
import json
from datetime import date from datetime import date
cover = 'https://img.kiosko.net/' + str( today = quote(date.today().strftime('%d/%m/%Y'), safe='')
date.today().year raw = self.index_to_soup(
) + '/' + date.today().strftime('%m') + '/' + date.today( 'https://epaper.eenadu.net/Home/GetAllpages?editionid=2&editiondate=' + today, raw=True
).strftime('%d') + '/in/eenadu.750.jpg' )
br = BasicNewsRecipe.get_browser(self, verify_ssl_certificates=False) for cov in json.loads(raw):
try: if cov['NewsProPageTitle'].lower().startswith('front'):
br.open(cover) return cov['HighResolution']
except:
index = 'https://es.kiosko.net/in/np/eenadu.html'
soup = self.index_to_soup(index)
for image in soup.findAll('img', src=True):
if image['src'].endswith('750.jpg'):
return 'https:' + image['src']
self.log("\nCover unavailable")
cover = None
return cover
def parse_index(self): feeds = []
section_list = [
('ఆంధ్రప్రదేశ్ తాజా వార్తలు', 'andhra-pradesh'), when = '27' # hours
('సంపాదకీయం', 'andhra-pradesh/editorial'), index = 'https://www.eenadu.net'
('ఆంధ్రప్రదేశ్ ప్రధానాంశాలు', 'andhra-pradesh/top-news'), a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=te-IN&gl=IN&ceid=IN:te'
('ఆంధ్రప్రదేశ్ జిల్లా వార్తలు', 'andhra-pradesh/districts'),
# ('క్రైమ్', 'crime'), news = index + '/telugu-news/'
news_list = [
('ఆంధ్రప్రదేశ్ ప్రధానాంశాలు', 'ap-top-news'),
('సంపాదకీయం', 'editorial'),
('వ్యాఖ్యానం', 'vyakyanam'),
('విశాఖపట్నం జిల్లా వార్తలు', 'districts/Visakhapatnam'),
('క్రైమ్', 'crime'),
('పాలిటిక్స్', 'politics'), ('పాలిటిక్స్', 'politics'),
('జాతీయం', 'india'), ('జాతీయం', 'india'),
('బిజినెస్', 'business'), ('బిజినెస్', 'business'),
('అంతర్జాతీయం', 'world'), ('అంతర్జాతీయం', 'world'),
('క్రీడలు', 'sports'), ('క్రీడలు', 'sports'),
# ('సినిమా', 'movies'), ('సినిమా', 'movies'),
# ('చదువు', 'education'), ('వసుంధర', 'women'),
# ('సుఖీభవ', 'health'), ('ఈ-నాడు', 'technology'),
# ('ఈ-నాడు', 'technology'), ('వెబ్ ప్రత్యేకం', 'explained')
# ('మకరందం', 'devotional'),
# ('ఈ తరం', 'youth'),
# ('ఆహా', 'recipes'),
# ('హాయ్ బుజ్జీ', 'kids-stories'),
# ('స్థిరాస్తి', 'real-estate'),
] ]
is_sunday = date.today().weekday() == 6 for n in news_list:
if is_sunday: news_index = news + n[1] + '/'
section_list.append(('సండే మ్యాగజైన్', 'sunday-magazine')) feeds.append((n[0], a.format(when, quote(news_index, safe=''))))
feeds = [] feeds.append(('Other News', a.format(when, quote(news, safe=''))))
# For each section title, fetch the article urls
for section in section_list:
section_title = section[0]
section_url = 'https://www.eenadu.net/' + section[1]
self.log(section_title, section_url)
soup = self.index_to_soup(section_url)
articles = self.articles_from_soup(soup)
if articles:
feeds.append((section_title, articles))
return feeds
def articles_from_soup(self, soup): art = index + '/telugu-article/'
ans = [] art_list = [
for link in soup.findAll( ('చదువు', 'education'),
attrs={ ('సుఖీభవ', 'health'),
'class': ['telugu_uni_body', 'thumb-content-more', 'owl-carousel'] ('ఆహా', 'recipes'),
} ('హాయ్ బుజ్జీ', 'kids-stories'),
): ('మకరందం', 'devotional'),
for a in link.findAll('a', attrs={'href': True}): ('దేవతార్చన', 'temples'),
url = a['href'] ('స్థిరాస్తి', 'real-estate'),
if url.startswith('https') is False: ('కథామృతం', 'kathalu'),
url = 'https://www.eenadu.net/' + url ('సండే మ్యాగజైన్', 'sunday-magazine')
]
for x in art_list:
art_index = art + x[1] + '/'
feeds.append((x[0], a.format(when, quote(art_index, safe=''))))
feeds.append(('Other Articles', a.format(when, quote(art, safe=''))))
try: feeds.append(('ఇతరులు', a.format(when, quote(index, safe=''))))
desc = self.tag_to_string(a.find('div')).strip() feeds.append(('ప్రతిభ', a.format(when, 'https://pratibha.eenadu.net/')))
except Exception:
desc = ''
for h3 in a.findAll('h3'): def populate_article_metadata(self, article, soup, first):
title = self.tag_to_string(h3).strip() article.url = self.art_url
sub = re.escape(title) article.title = article.title.replace(' - Eenadu', '')
desc = re.sub(sub, '', desc).strip() desc = soup.find(attrs={'class':'srtdes'})
if desc:
article.summary = self.tag_to_string(desc)
article.text_summary = article.summary
if not title or not url: def preprocess_raw_html(self, raw, *a):
continue import re
if '<!--Top Full Story Start -->' in raw:
self.log('\t', title, '\n\t', desc, '\n\t\t', url) body = re.search(r'<!--Top Full Story Start -->([^~]+?)<!--Tags Start -->', raw)
ans.append({'title': title, 'url': url, 'description': desc}) return '<html><body><div>' + body.group(1) + '</div></body></html>'
return ans return raw
def preprocess_html(self, soup):
div = soup.find('div', **classes('pub-t'))
if div:
date = parse_date(
self.tag_to_string(div).strip().replace('Published : ', '').replace(
'Updated : ', ''
).replace(' IST', ':00.000001')
).replace(tzinfo=None)
today = datetime.now()
if (today - date) > timedelta(1.15):
self.abort_article('Skipping old article')
else:
self.abort_article('not an article')
for img in soup.findAll('img', attrs={'data-src': True}):
img['src'] = img['data-src']
return soup