mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update Eenadu
This commit is contained in:
parent
917f9396f4
commit
62c66e9e2a
@ -1,11 +1,11 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
from datetime import date, datetime, timedelta
|
from datetime import date, datetime, timedelta
|
||||||
from calibre.utils.date import parse_date
|
from calibre.utils.date import parse_date
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
class eenadu(BasicNewsRecipe):
|
class eenadu_ts(BasicNewsRecipe):
|
||||||
title = 'ఈనాడు'
|
title = 'ఈనాడు - తెలంగాణ'
|
||||||
__author__ = 'unkn0wn'
|
__author__ = 'unkn0wn'
|
||||||
description = 'THE LARGEST CIRCULATED TELUGU DAILY'
|
description = 'THE LARGEST CIRCULATED TELUGU DAILY'
|
||||||
language = 'te'
|
language = 'te'
|
||||||
@ -13,66 +13,65 @@ class eenadu(BasicNewsRecipe):
|
|||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_attributes = ['height', 'width', 'style']
|
remove_attributes = ['height', 'width', 'style']
|
||||||
ignore_duplicate_articles = {"title", "url"}
|
ignore_duplicate_articles = {'url', 'title'}
|
||||||
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
||||||
cover_url = 'https://d66zsp32hue2v.cloudfront.net/Eenadu/2022/06/07/TEL/5_01/9de49f18_01_mr.jpg'
|
cover_url = 'https://d66zsp32hue2v.cloudfront.net/Eenadu/2022/08/08/GTH/5_01/d5041804_01_mr.jpg'
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
|
remove_empty_feeds = True
|
||||||
|
extra_css = '.pub-t{font-size:small; font-style:italic;}'
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
dict(name='h1'),
|
dict(name='h1'),
|
||||||
classes('eng-body grey pub-t text-justify contlist-cont'),
|
dict(**classes('pub-t')),
|
||||||
|
classes('fullstory text-justify contlist-cont'),
|
||||||
dict(name='span', attrs={'id': 'PDSAIApbreak'}),
|
dict(name='span', attrs={'id': 'PDSAIApbreak'}),
|
||||||
]
|
]
|
||||||
|
|
||||||
remove_tags = [classes('sshare-c')]
|
remove_tags = [
|
||||||
|
dict(name='span', attrs={'style': 'float:left; margin-right:10px;'}),
|
||||||
def articles_from_soup(self, soup):
|
dict(
|
||||||
ans = []
|
name='p',
|
||||||
for link in soup.findAll(attrs={'class': ['telugu_uni_body', 'thumb-content-more', 'owl-carousel']}):
|
attrs={
|
||||||
for a in link.findAll('a', attrs={'href': True}):
|
'style':
|
||||||
url = a['href']
|
'font-size: 18px !important; margin: 0px; margin-top: -15px; text-align: center;flex: 1;'
|
||||||
if url.startswith('https') is False:
|
}
|
||||||
url = 'https://www.eenadu.net/' + url
|
),
|
||||||
else:
|
dict(name='aside', attrs={'class': lambda x: x and x.startswith('thumb')}),
|
||||||
url = url
|
dict(name='br'),
|
||||||
for h3 in a.findAll('h3'):
|
classes('sshare-c tags andbeyond_ad fnt20 arti more2 offset-tb1 msb-list')
|
||||||
title = self.tag_to_string(h3)
|
]
|
||||||
if not url or not title:
|
|
||||||
continue
|
|
||||||
self.log('\t', title)
|
|
||||||
self.log('\t\t', url)
|
|
||||||
ans.append({
|
|
||||||
'title': title,
|
|
||||||
'url': url})
|
|
||||||
return ans
|
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup('https://www.eenadu.net/')
|
|
||||||
nav_div = soup.find(id='navbar')
|
|
||||||
section_list = [
|
section_list = [
|
||||||
('సంపాదకీయం', 'https://www.eenadu.net/telangana/editorial'),
|
('తెలంగాణ తాజా వార్తలు', 'telangana'),
|
||||||
|
('సంపాదకీయం', 'telangana/editorial'),
|
||||||
|
('తెలంగాణ ప్రధానాంశాలు', 'telangana/top-news'),
|
||||||
|
('తెలంగాణ జిల్లా వార్తలు', 'telangana/districts'),
|
||||||
|
# ('క్రైమ్', 'crime'),
|
||||||
|
('పాలిటిక్స్', 'politics'),
|
||||||
|
('జాతీయం', 'india'),
|
||||||
|
('బిజినెస్', 'business'),
|
||||||
|
('అంతర్జాతీయం', 'world'),
|
||||||
|
('క్రీడలు', 'sports'),
|
||||||
|
# ('సినిమా', 'movies'),
|
||||||
|
# ('చదువు', 'education'),
|
||||||
|
# ('సుఖీభవ', 'health'),
|
||||||
|
# ('ఈ-నాడు', 'technology'),
|
||||||
|
# ('మకరందం', 'devotional'),
|
||||||
|
# ('ఈ తరం', 'youth'),
|
||||||
|
# ('ఆహా', 'recipes'),
|
||||||
|
# ('హాయ్ బుజ్జీ', 'kids-stories'),
|
||||||
|
# ('స్థిరాస్తి', 'real-estate'),
|
||||||
]
|
]
|
||||||
|
is_sunday = date.today().weekday() == 6
|
||||||
# Finding all the section titles that are acceptable
|
if is_sunday:
|
||||||
for x in nav_div.findAll(['a']):
|
section_list.append(('సండే మ్యాగజైన్', 'sunday-magazine'))
|
||||||
if self.is_accepted_entry(x):
|
|
||||||
sec = self.tag_to_string(x)
|
|
||||||
link = x['href']
|
|
||||||
if link.endswith('telangana'):
|
|
||||||
sec = 'తెలంగాణ'
|
|
||||||
if link.endswith('andhra-pradesh'):
|
|
||||||
sec = 'ఆంధ్రప్రదేశ్'
|
|
||||||
if link.endswith('andhra-pradesh/districts'):
|
|
||||||
sec = 'ఆంధ్రప్రదేశ్.. ఆసక్తికర జిల్లా వార్తలు'
|
|
||||||
if link.endswith('telangana/districts'):
|
|
||||||
sec = 'తెలంగాణ.. ఆసక్తికర జిల్లా వార్తలు'
|
|
||||||
section_list.append((sec, link))
|
|
||||||
feeds = []
|
feeds = []
|
||||||
|
|
||||||
# For each section title, fetch the article urls
|
# For each section title, fetch the article urls
|
||||||
for section in section_list:
|
for section in section_list:
|
||||||
section_title = section[0]
|
section_title = section[0]
|
||||||
section_url = section[1]
|
section_url = 'https://www.eenadu.net/' + section[1]
|
||||||
self.log(section_title, section_url)
|
self.log(section_title, section_url)
|
||||||
soup = self.index_to_soup(section_url)
|
soup = self.index_to_soup(section_url)
|
||||||
articles = self.articles_from_soup(soup)
|
articles = self.articles_from_soup(soup)
|
||||||
@ -80,88 +79,48 @@ class eenadu(BasicNewsRecipe):
|
|||||||
feeds.append((section_title, articles))
|
feeds.append((section_title, articles))
|
||||||
return feeds
|
return feeds
|
||||||
|
|
||||||
def is_accepted_entry(self, entry):
|
def articles_from_soup(self, soup):
|
||||||
# Those sections in the top nav bar that we will omit
|
ans = []
|
||||||
# https://www.eenadu.net/nri
|
for link in soup.findAll(
|
||||||
is_sunday = date.today().weekday() == 6
|
attrs={
|
||||||
if is_sunday:
|
'class': ['telugu_uni_body', 'thumb-content-more', 'owl-carousel']
|
||||||
omit_list = [
|
}
|
||||||
'net/',
|
):
|
||||||
'javascript:void(0)',
|
for a in link.findAll('a', attrs={'href': True}):
|
||||||
'#',
|
url = a['href']
|
||||||
'sports',
|
if url.startswith('https') is False:
|
||||||
'movies',
|
url = 'https://www.eenadu.net/' + url
|
||||||
'women',
|
|
||||||
'technology',
|
|
||||||
'business',
|
|
||||||
'stories.eenadu.net',
|
|
||||||
'calendar',
|
|
||||||
'devotional',
|
|
||||||
'youth',
|
|
||||||
'recipes',
|
|
||||||
'real-estate',
|
|
||||||
'temples',
|
|
||||||
'kathalu',
|
|
||||||
'viral-videos',
|
|
||||||
'Nri',
|
|
||||||
'videos',
|
|
||||||
'explained',
|
|
||||||
'agriculture',
|
|
||||||
'crime',
|
|
||||||
'health',
|
|
||||||
'photos',
|
|
||||||
'kids-stories',
|
|
||||||
'education',
|
|
||||||
]
|
|
||||||
|
|
||||||
else:
|
try:
|
||||||
omit_list = [
|
desc = self.tag_to_string(a.find('div')).strip()
|
||||||
'net/',
|
except Exception:
|
||||||
'javascript:void(0)',
|
desc = ''
|
||||||
'#',
|
|
||||||
'sports',
|
|
||||||
'movies',
|
|
||||||
'women',
|
|
||||||
'technology',
|
|
||||||
'business',
|
|
||||||
'stories.eenadu.net',
|
|
||||||
'calendar',
|
|
||||||
'devotional',
|
|
||||||
'youth',
|
|
||||||
'recipes',
|
|
||||||
'real-estate',
|
|
||||||
'temples',
|
|
||||||
'kathalu',
|
|
||||||
'viral-videos',
|
|
||||||
'Nri',
|
|
||||||
'videos',
|
|
||||||
'explained',
|
|
||||||
'agriculture',
|
|
||||||
'sunday-magazine',
|
|
||||||
'crime',
|
|
||||||
'health',
|
|
||||||
'photos',
|
|
||||||
'kids-stories',
|
|
||||||
'education',
|
|
||||||
]
|
|
||||||
|
|
||||||
is_accepted = True
|
for h3 in a.findAll('h3'):
|
||||||
for omit_entry in omit_list:
|
title = self.tag_to_string(h3).strip()
|
||||||
if entry['href'].endswith(omit_entry):
|
sub = re.escape(title)
|
||||||
is_accepted = False
|
desc = re.sub(sub, '', desc).strip()
|
||||||
break
|
|
||||||
return is_accepted
|
if not title or not url:
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
|
||||||
|
ans.append({'title': title, 'url': url, 'description': desc})
|
||||||
|
return ans
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
div = soup.find('div', **classes('pub-t'))
|
div = soup.find('div', **classes('pub-t'))
|
||||||
if div:
|
if div:
|
||||||
date = parse_date(
|
date = parse_date(
|
||||||
self.tag_to_string(div)
|
self.tag_to_string(div).strip().replace('Published : ', '').replace(
|
||||||
.strip().replace('Published : ','').replace('Updated : ','').replace(' IST',':00.000001')
|
'Updated : ', ''
|
||||||
).replace(tzinfo=None)
|
).replace(' IST', ':00.000001')
|
||||||
|
).replace(tzinfo=None)
|
||||||
today = datetime.now()
|
today = datetime.now()
|
||||||
if (today - date) > timedelta(1.5):
|
if (today - date) > timedelta(1.15):
|
||||||
self.abort_article('Skipping old article')
|
self.abort_article('Skipping old article')
|
||||||
else: # may not be an article.
|
else:
|
||||||
self.abort_article()
|
self.abort_article('not an article')
|
||||||
|
for img in soup.findAll('img', attrs={'data-src': True}):
|
||||||
|
img['src'] = img['data-src']
|
||||||
return soup
|
return soup
|
||||||
|
126
recipes/eenadu_ap.recipe
Normal file
126
recipes/eenadu_ap.recipe
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
from datetime import date, datetime, timedelta
|
||||||
|
from calibre.utils.date import parse_date
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class eenadu_ap(BasicNewsRecipe):
|
||||||
|
title = 'ఈనాడు - ఆంధ్రప్రదేశ్'
|
||||||
|
__author__ = 'unkn0wn'
|
||||||
|
description = 'THE LARGEST CIRCULATED TELUGU DAILY'
|
||||||
|
language = 'te'
|
||||||
|
use_embedded_content = False
|
||||||
|
remove_javascript = True
|
||||||
|
no_stylesheets = True
|
||||||
|
remove_attributes = ['height', 'width', 'style']
|
||||||
|
ignore_duplicate_articles = {'url', 'title'}
|
||||||
|
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
||||||
|
cover_url = 'https://d66zsp32hue2v.cloudfront.net/Eenadu/2022/08/03/CAN/5_01/bfff5654_01_mr.jpg'
|
||||||
|
encoding = 'utf-8'
|
||||||
|
remove_empty_feeds = True
|
||||||
|
extra_css = '.pub-t{font-size:small; font-style:italic;}'
|
||||||
|
|
||||||
|
keep_only_tags = [
|
||||||
|
dict(name='h1'),
|
||||||
|
dict(**classes('pub-t')),
|
||||||
|
classes('fullstory text-justify contlist-cont'),
|
||||||
|
dict(name='span', attrs={'id': 'PDSAIApbreak'}),
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_tags = [
|
||||||
|
dict(name='span', attrs={'style': 'float:left; margin-right:10px;'}),
|
||||||
|
dict(
|
||||||
|
name='p',
|
||||||
|
attrs={
|
||||||
|
'style':
|
||||||
|
'font-size: 18px !important; margin: 0px; margin-top: -15px; text-align: center;flex: 1;'
|
||||||
|
}
|
||||||
|
),
|
||||||
|
dict(name='aside', attrs={'class': lambda x: x and x.startswith('thumb')}),
|
||||||
|
dict(name='br'),
|
||||||
|
classes('sshare-c tags andbeyond_ad fnt20 arti more2 offset-tb1 msb-list')
|
||||||
|
]
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
section_list = [
|
||||||
|
('ఆంధ్రప్రదేశ్ తాజా వార్తలు', 'andhra-pradesh'),
|
||||||
|
('సంపాదకీయం', 'andhra-pradesh/editorial'),
|
||||||
|
('ఆంధ్రప్రదేశ్ ప్రధానాంశాలు', 'andhra-pradesh/top-news'),
|
||||||
|
('ఆంధ్రప్రదేశ్ జిల్లా వార్తలు', 'andhra-pradesh/districts'),
|
||||||
|
# ('క్రైమ్', 'crime'),
|
||||||
|
('పాలిటిక్స్', 'politics'),
|
||||||
|
('జాతీయం', 'india'),
|
||||||
|
('బిజినెస్', 'business'),
|
||||||
|
('అంతర్జాతీయం', 'world'),
|
||||||
|
('క్రీడలు', 'sports'),
|
||||||
|
# ('సినిమా', 'movies'),
|
||||||
|
# ('చదువు', 'education'),
|
||||||
|
# ('సుఖీభవ', 'health'),
|
||||||
|
# ('ఈ-నాడు', 'technology'),
|
||||||
|
# ('మకరందం', 'devotional'),
|
||||||
|
# ('ఈ తరం', 'youth'),
|
||||||
|
# ('ఆహా', 'recipes'),
|
||||||
|
# ('హాయ్ బుజ్జీ', 'kids-stories'),
|
||||||
|
# ('స్థిరాస్తి', 'real-estate'),
|
||||||
|
]
|
||||||
|
is_sunday = date.today().weekday() == 6
|
||||||
|
if is_sunday:
|
||||||
|
section_list.append(('సండే మ్యాగజైన్', 'sunday-magazine'))
|
||||||
|
feeds = []
|
||||||
|
|
||||||
|
# For each section title, fetch the article urls
|
||||||
|
for section in section_list:
|
||||||
|
section_title = section[0]
|
||||||
|
section_url = 'https://www.eenadu.net/' + section[1]
|
||||||
|
self.log(section_title, section_url)
|
||||||
|
soup = self.index_to_soup(section_url)
|
||||||
|
articles = self.articles_from_soup(soup)
|
||||||
|
if articles:
|
||||||
|
feeds.append((section_title, articles))
|
||||||
|
return feeds
|
||||||
|
|
||||||
|
def articles_from_soup(self, soup):
|
||||||
|
ans = []
|
||||||
|
for link in soup.findAll(
|
||||||
|
attrs={
|
||||||
|
'class': ['telugu_uni_body', 'thumb-content-more', 'owl-carousel']
|
||||||
|
}
|
||||||
|
):
|
||||||
|
for a in link.findAll('a', attrs={'href': True}):
|
||||||
|
url = a['href']
|
||||||
|
if url.startswith('https') is False:
|
||||||
|
url = 'https://www.eenadu.net/' + url
|
||||||
|
|
||||||
|
try:
|
||||||
|
desc = self.tag_to_string(a.find('div')).strip()
|
||||||
|
except Exception:
|
||||||
|
desc = ''
|
||||||
|
|
||||||
|
for h3 in a.findAll('h3'):
|
||||||
|
title = self.tag_to_string(h3).strip()
|
||||||
|
sub = re.escape(title)
|
||||||
|
desc = re.sub(sub, '', desc).strip()
|
||||||
|
|
||||||
|
if not title or not url:
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
|
||||||
|
ans.append({'title': title, 'url': url, 'description': desc})
|
||||||
|
return ans
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
div = soup.find('div', **classes('pub-t'))
|
||||||
|
if div:
|
||||||
|
date = parse_date(
|
||||||
|
self.tag_to_string(div).strip().replace('Published : ', '').replace(
|
||||||
|
'Updated : ', ''
|
||||||
|
).replace(' IST', ':00.000001')
|
||||||
|
).replace(tzinfo=None)
|
||||||
|
today = datetime.now()
|
||||||
|
if (today - date) > timedelta(1.15):
|
||||||
|
self.abort_article('Skipping old article')
|
||||||
|
else:
|
||||||
|
self.abort_article('not an article')
|
||||||
|
for img in soup.findAll('img', attrs={'data-src': True}):
|
||||||
|
img['src'] = img['data-src']
|
||||||
|
return soup
|
Loading…
x
Reference in New Issue
Block a user