mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update eenadu
This commit is contained in:
parent
75d111e3f6
commit
8cc78aba7a
@ -1,5 +1,9 @@
|
|||||||
from urllib.parse import quote
|
import json
|
||||||
|
import re
|
||||||
|
from datetime import date, datetime, timedelta
|
||||||
|
|
||||||
|
from urllib.parse import quote
|
||||||
|
from calibre.utils.date import parse_date
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -14,10 +18,6 @@ class eenadu_ts(BasicNewsRecipe):
|
|||||||
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
ignore_duplicate_articles = {'url', 'title'}
|
ignore_duplicate_articles = {'url', 'title'}
|
||||||
reverse_article_order = True
|
|
||||||
remove_empty_feeds = True
|
|
||||||
simultaneous_downloads = 1
|
|
||||||
art_url = ''
|
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
img {display:block; margin:0 auto;}
|
img {display:block; margin:0 auto;}
|
||||||
@ -28,22 +28,6 @@ class eenadu_ts(BasicNewsRecipe):
|
|||||||
keep_only_tags = [classes('bookWrapper fullstory')]
|
keep_only_tags = [classes('bookWrapper fullstory')]
|
||||||
remove_tags = [classes('ext-link offset-tb1 sshare-c')]
|
remove_tags = [classes('ext-link offset-tb1 sshare-c')]
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
|
||||||
br = self.get_browser()
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.a['href']
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/multimedia/', 'marathi', 'hindi', 'bangla'
|
|
||||||
]
|
|
||||||
if any(x in link for x in skip_sections):
|
|
||||||
self.abort_article('skipping video links')
|
|
||||||
self.log('Found ', link)
|
|
||||||
self.art_url = link
|
|
||||||
html = br.open(link).read()
|
|
||||||
return ({ 'data': html, 'url': link })
|
|
||||||
|
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
@ -55,8 +39,6 @@ class eenadu_ts(BasicNewsRecipe):
|
|||||||
return br
|
return br
|
||||||
|
|
||||||
def get_cover_url(self):
|
def get_cover_url(self):
|
||||||
import json
|
|
||||||
from datetime import date
|
|
||||||
today = quote(date.today().strftime('%d/%m/%Y'), safe='')
|
today = quote(date.today().strftime('%d/%m/%Y'), safe='')
|
||||||
raw = self.index_to_soup(
|
raw = self.index_to_soup(
|
||||||
'https://epaper.eenadu.net/Home/GetAllpages?editionid=1&editiondate=' + today, raw=True
|
'https://epaper.eenadu.net/Home/GetAllpages?editionid=1&editiondate=' + today, raw=True
|
||||||
@ -65,66 +47,71 @@ class eenadu_ts(BasicNewsRecipe):
|
|||||||
if cov['NewsProPageTitle'].lower().startswith('front'):
|
if cov['NewsProPageTitle'].lower().startswith('front'):
|
||||||
return cov['HighResolution']
|
return cov['HighResolution']
|
||||||
|
|
||||||
feeds = []
|
def parse_index(self):
|
||||||
|
inx = 'https://www.eenadu.net/'
|
||||||
when = '27' # hours
|
section_list = [
|
||||||
index = 'https://www.eenadu.net'
|
('తెలంగాణ', 'telangana'),
|
||||||
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=te-IN&gl=IN&ceid=IN:te'
|
|
||||||
|
|
||||||
news = index + '/telugu-news/'
|
|
||||||
news_list = [
|
|
||||||
('తెలంగాణ ప్రధానాంశాలు', 'ts-top-news'),
|
|
||||||
('సంపాదకీయం', 'editorial'),
|
|
||||||
('వ్యాఖ్యానం', 'vyakyanam'),
|
|
||||||
('హైదరాబాద్ జిల్లా వార్తలు', 'districts/Hyderabad'),
|
|
||||||
('క్రైమ్', 'crime'),
|
|
||||||
('పాలిటిక్స్', 'politics'),
|
('పాలిటిక్స్', 'politics'),
|
||||||
('జాతీయం', 'india'),
|
('జాతీయం', 'india'),
|
||||||
|
('సంపాదకీయం', 'editorial'),
|
||||||
('బిజినెస్', 'business'),
|
('బిజినెస్', 'business'),
|
||||||
|
# ('క్రైమ్', 'crime'),
|
||||||
('అంతర్జాతీయం', 'world'),
|
('అంతర్జాతీయం', 'world'),
|
||||||
('క్రీడలు', 'sports'),
|
('క్రీడలు', 'sports'),
|
||||||
('సినిమా', 'movies'),
|
('సినిమా', 'movies'),
|
||||||
('వసుంధర', 'women'),
|
('వసుంధర', 'women'),
|
||||||
('ఈ-నాడు', 'technology'),
|
('హైదరాబాద్ జిల్లా వార్తలు', 'telangana/districts/hyderabad'),
|
||||||
('వెబ్ ప్రత్యేకం', 'explained')
|
|
||||||
]
|
]
|
||||||
for n in news_list:
|
|
||||||
news_index = news + n[1] + '/'
|
|
||||||
feeds.append((n[0], a.format(when, quote(news_index, safe=''))))
|
|
||||||
feeds.append(('Other News', a.format(when, quote(news, safe=''))))
|
|
||||||
|
|
||||||
|
feeds = []
|
||||||
|
|
||||||
art = index + '/telugu-article/'
|
# For each section title, fetch the article urls
|
||||||
art_list = [
|
for section in section_list:
|
||||||
('చదువు', 'education'),
|
section_title = section[0]
|
||||||
('సుఖీభవ', 'health'),
|
section_url = section[1]
|
||||||
('ఆహా', 'recipes'),
|
self.log(section_title)
|
||||||
('హాయ్ బుజ్జీ', 'kids-stories'),
|
soup = self.index_to_soup(inx + section_url)
|
||||||
('మకరందం', 'devotional'),
|
articles = self.articles_from_soup(soup)
|
||||||
('దేవతార్చన', 'temples'),
|
if articles:
|
||||||
('స్థిరాస్తి', 'real-estate'),
|
feeds.append((section_title, articles))
|
||||||
('కథామృతం', 'kathalu'),
|
return feeds
|
||||||
('సండే మ్యాగజైన్', 'sunday-magazine')
|
|
||||||
]
|
|
||||||
for x in art_list:
|
|
||||||
art_index = art + x[1] + '/'
|
|
||||||
feeds.append((x[0], a.format(when, quote(art_index, safe=''))))
|
|
||||||
feeds.append(('Other Articles', a.format(when, quote(art, safe=''))))
|
|
||||||
|
|
||||||
feeds.append(('ఇతరులు', a.format(when, quote(index, safe=''))))
|
def articles_from_soup(self, soup):
|
||||||
feeds.append(('ప్రతిభ', a.format(when, 'https://pratibha.eenadu.net/')))
|
div = soup.find('div', attrs={'class':['col-left', 'district-more']})
|
||||||
|
ans = []
|
||||||
|
for link in div.findAll(attrs={'class': ['telugu_uni_body', 'thumb-content-more', 'owl-carousel']}):
|
||||||
|
for a in link.findAll('a', attrs={'href': True}):
|
||||||
|
url = a['href']
|
||||||
|
if not url.startswith('http'):
|
||||||
|
url = 'https://www.eenadu.net/' + url
|
||||||
|
h = a.find(['h4', 'h3', 'h2', 'h1'])
|
||||||
|
if h:
|
||||||
|
title = self.tag_to_string(h).strip()
|
||||||
|
if 'telugu-news' not in url:
|
||||||
|
continue
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
ans.append({'title': title, 'url': url})
|
||||||
|
return ans
|
||||||
|
|
||||||
def populate_article_metadata(self, article, soup, first):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
article.url = self.art_url
|
|
||||||
article.title = article.title.replace(' - Eenadu', '')
|
|
||||||
desc = soup.find(attrs={'class':'srtdes'})
|
desc = soup.find(attrs={'class':'srtdes'})
|
||||||
if desc:
|
if desc:
|
||||||
article.summary = self.tag_to_string(desc)
|
article.summary = self.tag_to_string(desc)
|
||||||
article.text_summary = article.summary
|
article.text_summary = article.summary
|
||||||
|
|
||||||
def preprocess_raw_html(self, raw, *a):
|
def preprocess_raw_html(self, raw, *a):
|
||||||
import re
|
|
||||||
if '<!--Top Full Story Start -->' in raw:
|
if '<!--Top Full Story Start -->' in raw:
|
||||||
body = re.search(r'<!--Top Full Story Start -->([^~]+?)<!--Tags Start -->', raw)
|
body = re.search(r'<!--Top Full Story Start -->([^~]+?)<!--Tags Start -->', raw)
|
||||||
return '<html><body><div>' + body.group(1) + '</div></body></html>'
|
return '<html><body><div>' + body.group(1) + '</div></body></html>'
|
||||||
return raw
|
return raw
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
div = soup.find(**classes('pub-t'))
|
||||||
|
if div:
|
||||||
|
dt = re.search(r'\d+.+\d+', self.tag_to_string(div))
|
||||||
|
date = parse_date(dt.group(0) + ':00.000000').replace(tzinfo=None)
|
||||||
|
if (datetime.now() - date) > timedelta(1.5):
|
||||||
|
self.abort_article('Skipping old article')
|
||||||
|
else:
|
||||||
|
self.abort_article('may not be an artilce')
|
||||||
|
return soup
|
||||||
|
@ -1,5 +1,9 @@
|
|||||||
from urllib.parse import quote
|
import json
|
||||||
|
import re
|
||||||
|
from datetime import date, datetime, timedelta
|
||||||
|
|
||||||
|
from urllib.parse import quote
|
||||||
|
from calibre.utils.date import parse_date
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -14,10 +18,6 @@ class eenadu_ap(BasicNewsRecipe):
|
|||||||
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
ignore_duplicate_articles = {'url', 'title'}
|
ignore_duplicate_articles = {'url', 'title'}
|
||||||
reverse_article_order = True
|
|
||||||
remove_empty_feeds = True
|
|
||||||
simultaneous_downloads = 1
|
|
||||||
art_url = ''
|
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
img {display:block; margin:0 auto;}
|
img {display:block; margin:0 auto;}
|
||||||
@ -28,22 +28,6 @@ class eenadu_ap(BasicNewsRecipe):
|
|||||||
keep_only_tags = [classes('bookWrapper fullstory')]
|
keep_only_tags = [classes('bookWrapper fullstory')]
|
||||||
remove_tags = [classes('ext-link offset-tb1 sshare-c')]
|
remove_tags = [classes('ext-link offset-tb1 sshare-c')]
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
|
||||||
br = self.get_browser()
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.a['href']
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/multimedia/', 'marathi', 'hindi', 'bangla'
|
|
||||||
]
|
|
||||||
if any(x in link for x in skip_sections):
|
|
||||||
self.abort_article('skipping video links')
|
|
||||||
self.log('Found ', link)
|
|
||||||
self.art_url = link
|
|
||||||
html = br.open(link).read()
|
|
||||||
return ({ 'data': html, 'url': link })
|
|
||||||
|
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
@ -65,66 +49,71 @@ class eenadu_ap(BasicNewsRecipe):
|
|||||||
if cov['NewsProPageTitle'].lower().startswith('front'):
|
if cov['NewsProPageTitle'].lower().startswith('front'):
|
||||||
return cov['HighResolution']
|
return cov['HighResolution']
|
||||||
|
|
||||||
feeds = []
|
def parse_index(self):
|
||||||
|
inx = 'https://www.eenadu.net/'
|
||||||
when = '27' # hours
|
section_list = [
|
||||||
index = 'https://www.eenadu.net'
|
('ఆంధ్రప్రదేశ్', 'andhra-pradesh'),
|
||||||
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=te-IN&gl=IN&ceid=IN:te'
|
|
||||||
|
|
||||||
news = index + '/telugu-news/'
|
|
||||||
news_list = [
|
|
||||||
('ఆంధ్రప్రదేశ్ ప్రధానాంశాలు', 'ap-top-news'),
|
|
||||||
('సంపాదకీయం', 'editorial'),
|
|
||||||
('వ్యాఖ్యానం', 'vyakyanam'),
|
|
||||||
('విశాఖపట్నం జిల్లా వార్తలు', 'districts/Visakhapatnam'),
|
|
||||||
('క్రైమ్', 'crime'),
|
|
||||||
('పాలిటిక్స్', 'politics'),
|
('పాలిటిక్స్', 'politics'),
|
||||||
('జాతీయం', 'india'),
|
('జాతీయం', 'india'),
|
||||||
|
('సంపాదకీయం', 'editorial'),
|
||||||
('బిజినెస్', 'business'),
|
('బిజినెస్', 'business'),
|
||||||
|
# ('క్రైమ్', 'crime'),
|
||||||
('అంతర్జాతీయం', 'world'),
|
('అంతర్జాతీయం', 'world'),
|
||||||
('క్రీడలు', 'sports'),
|
('క్రీడలు', 'sports'),
|
||||||
('సినిమా', 'movies'),
|
('సినిమా', 'movies'),
|
||||||
('వసుంధర', 'women'),
|
('వసుంధర', 'women'),
|
||||||
('ఈ-నాడు', 'technology'),
|
('విశాఖపట్నం జిల్లా వార్తలు', 'andhra-pradesh/districts/visakhapatnam'),
|
||||||
('వెబ్ ప్రత్యేకం', 'explained')
|
|
||||||
]
|
]
|
||||||
for n in news_list:
|
|
||||||
news_index = news + n[1] + '/'
|
|
||||||
feeds.append((n[0], a.format(when, quote(news_index, safe=''))))
|
|
||||||
feeds.append(('Other News', a.format(when, quote(news, safe=''))))
|
|
||||||
|
|
||||||
|
feeds = []
|
||||||
|
|
||||||
art = index + '/telugu-article/'
|
# For each section title, fetch the article urls
|
||||||
art_list = [
|
for section in section_list:
|
||||||
('చదువు', 'education'),
|
section_title = section[0]
|
||||||
('సుఖీభవ', 'health'),
|
section_url = section[1]
|
||||||
('ఆహా', 'recipes'),
|
self.log(section_title)
|
||||||
('హాయ్ బుజ్జీ', 'kids-stories'),
|
soup = self.index_to_soup(inx + section_url)
|
||||||
('మకరందం', 'devotional'),
|
articles = self.articles_from_soup(soup)
|
||||||
('దేవతార్చన', 'temples'),
|
if articles:
|
||||||
('స్థిరాస్తి', 'real-estate'),
|
feeds.append((section_title, articles))
|
||||||
('కథామృతం', 'kathalu'),
|
return feeds
|
||||||
('సండే మ్యాగజైన్', 'sunday-magazine')
|
|
||||||
]
|
|
||||||
for x in art_list:
|
|
||||||
art_index = art + x[1] + '/'
|
|
||||||
feeds.append((x[0], a.format(when, quote(art_index, safe=''))))
|
|
||||||
feeds.append(('Other Articles', a.format(when, quote(art, safe=''))))
|
|
||||||
|
|
||||||
feeds.append(('ఇతరులు', a.format(when, quote(index, safe=''))))
|
def articles_from_soup(self, soup):
|
||||||
feeds.append(('ప్రతిభ', a.format(when, 'https://pratibha.eenadu.net/')))
|
div = soup.find('div', attrs={'class':['col-left', 'district-more']})
|
||||||
|
ans = []
|
||||||
|
for link in div.findAll(attrs={'class': ['telugu_uni_body', 'thumb-content-more', 'owl-carousel']}):
|
||||||
|
for a in link.findAll('a', attrs={'href': True}):
|
||||||
|
url = a['href']
|
||||||
|
if not url.startswith('http'):
|
||||||
|
url = 'https://www.eenadu.net/' + url
|
||||||
|
h = a.find(['h4', 'h3', 'h2', 'h1'])
|
||||||
|
if h:
|
||||||
|
title = self.tag_to_string(h).strip()
|
||||||
|
if 'telugu-news' not in url:
|
||||||
|
continue
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
ans.append({'title': title, 'url': url})
|
||||||
|
return ans
|
||||||
|
|
||||||
def populate_article_metadata(self, article, soup, first):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
article.url = self.art_url
|
|
||||||
article.title = article.title.replace(' - Eenadu', '')
|
|
||||||
desc = soup.find(attrs={'class':'srtdes'})
|
desc = soup.find(attrs={'class':'srtdes'})
|
||||||
if desc:
|
if desc:
|
||||||
article.summary = self.tag_to_string(desc)
|
article.summary = self.tag_to_string(desc)
|
||||||
article.text_summary = article.summary
|
article.text_summary = article.summary
|
||||||
|
|
||||||
def preprocess_raw_html(self, raw, *a):
|
def preprocess_raw_html(self, raw, *a):
|
||||||
import re
|
|
||||||
if '<!--Top Full Story Start -->' in raw:
|
if '<!--Top Full Story Start -->' in raw:
|
||||||
body = re.search(r'<!--Top Full Story Start -->([^~]+?)<!--Tags Start -->', raw)
|
body = re.search(r'<!--Top Full Story Start -->([^~]+?)<!--Tags Start -->', raw)
|
||||||
return '<html><body><div>' + body.group(1) + '</div></body></html>'
|
return '<html><body><div>' + body.group(1) + '</div></body></html>'
|
||||||
return raw
|
return raw
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
div = soup.find(**classes('pub-t'))
|
||||||
|
if div:
|
||||||
|
dt = re.search(r'\d+.+\d+', self.tag_to_string(div))
|
||||||
|
date = parse_date(dt.group(0) + ':00.000000').replace(tzinfo=None)
|
||||||
|
if (datetime.now() - date) > timedelta(1.5):
|
||||||
|
self.abort_article('Skipping old article')
|
||||||
|
else:
|
||||||
|
self.abort_article('may not be an artilce')
|
||||||
|
return soup
|
||||||
|
Loading…
x
Reference in New Issue
Block a user