mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-12-25 14:27:21 -05:00
145 lines
4.5 KiB
Plaintext
145 lines
4.5 KiB
Plaintext
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
|
|
|
from datetime import date
|
|
|
|
|
|
class eenadu(BasicNewsRecipe):
|
|
title = 'ఈనాడు'
|
|
__author__ = 'unkn0wn'
|
|
description = 'THE LARGEST CIRCULATED TELUGU DAILY'
|
|
language = 'te'
|
|
use_embedded_content = False
|
|
remove_javascript = True
|
|
no_stylesheets = True
|
|
remove_attributes = ['height', 'width', 'style']
|
|
ignore_duplicate_articles = {"title", "url"}
|
|
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
|
cover_url = 'https://d66zsp32hue2v.cloudfront.net/Eenadu/2022/06/07/TEL/5_01/9de49f18_01_mr.jpg'
|
|
encoding = 'utf-8'
|
|
|
|
keep_only_tags = [
|
|
dict(name='h1'),
|
|
classes('eng-body grey pub-t text-justify contlist-cont'),
|
|
dict(name='span', attrs={'id': 'PDSAIApbreak'}),
|
|
]
|
|
|
|
remove_tags = [classes('sshare-c')]
|
|
|
|
def articles_from_soup(self, soup):
|
|
ans = []
|
|
for link in soup.findAll(attrs={'class': ['telugu_uni_body', 'thumb-content-more', 'owl-carousel']}):
|
|
for a in link.findAll('a', attrs={'href': True}):
|
|
url = a['href']
|
|
if url.startswith('https') is False:
|
|
url = 'https://www.eenadu.net/' + url
|
|
else:
|
|
url = url
|
|
for h3 in a.findAll('h3'):
|
|
title = self.tag_to_string(h3)
|
|
if not url or not title:
|
|
continue
|
|
self.log('\t', title)
|
|
self.log('\t\t', url)
|
|
ans.append({
|
|
'title': title,
|
|
'url': url})
|
|
return ans
|
|
|
|
def parse_index(self):
|
|
soup = self.index_to_soup('https://www.eenadu.net/')
|
|
nav_div = soup.find(id='navbar')
|
|
section_list = [
|
|
('సంపాదకీయం', 'https://www.eenadu.net/telangana/editorial'),
|
|
]
|
|
|
|
# Finding all the section titles that are acceptable
|
|
for x in nav_div.findAll(['a']):
|
|
if self.is_accepted_entry(x):
|
|
section_list.append(
|
|
(self.tag_to_string(x), x['href'])
|
|
)
|
|
feeds = []
|
|
|
|
# For each section title, fetch the article urls
|
|
for section in section_list:
|
|
section_title = section[0]
|
|
section_url = section[1]
|
|
self.log(section_title, section_url)
|
|
soup = self.index_to_soup(section_url)
|
|
articles = self.articles_from_soup(soup)
|
|
if articles:
|
|
feeds.append((section_title, articles))
|
|
return feeds
|
|
|
|
def is_accepted_entry(self, entry):
|
|
# Those sections in the top nav bar that we will omit
|
|
# https://www.eenadu.net/nri
|
|
is_sunday = date.today().weekday() == 6
|
|
if is_sunday:
|
|
omit_list = [
|
|
'net/',
|
|
'javascript:void(0)',
|
|
'#',
|
|
'sports',
|
|
'movies',
|
|
'women',
|
|
'technology',
|
|
'business',
|
|
'stories.eenadu.net',
|
|
'calendar',
|
|
'devotional',
|
|
'youth',
|
|
'recipes',
|
|
'real-estate',
|
|
'temples',
|
|
'kathalu',
|
|
'viral-videos',
|
|
'Nri',
|
|
'videos',
|
|
'explained',
|
|
'agriculture',
|
|
'crime',
|
|
'health',
|
|
'photos',
|
|
'kids-stories',
|
|
'education',
|
|
]
|
|
|
|
else:
|
|
omit_list = [
|
|
'net/',
|
|
'javascript:void(0)',
|
|
'#',
|
|
'sports',
|
|
'movies',
|
|
'women',
|
|
'technology',
|
|
'business',
|
|
'stories.eenadu.net',
|
|
'calendar',
|
|
'devotional',
|
|
'youth',
|
|
'recipes',
|
|
'real-estate',
|
|
'temples',
|
|
'kathalu',
|
|
'viral-videos',
|
|
'Nri',
|
|
'videos',
|
|
'explained',
|
|
'agriculture',
|
|
'sunday-magazine',
|
|
'crime',
|
|
'health',
|
|
'photos',
|
|
'kids-stories',
|
|
'education',
|
|
]
|
|
|
|
is_accepted = True
|
|
for omit_entry in omit_list:
|
|
if entry['href'].endswith(omit_entry):
|
|
is_accepted = False
|
|
break
|
|
return is_accepted
|