mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge branch 'master' of https://github.com/unkn0w7n/calibre
This commit is contained in:
commit
3c9dc1bcd9
@ -1,7 +1,4 @@
|
|||||||
import re
|
from urllib.parse import quote
|
||||||
from datetime import date, datetime, timedelta
|
|
||||||
|
|
||||||
from calibre.utils.date import parse_date
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -10,118 +7,116 @@ class eenadu_ts(BasicNewsRecipe):
|
|||||||
__author__ = 'unkn0wn'
|
__author__ = 'unkn0wn'
|
||||||
description = 'THE LARGEST CIRCULATED TELUGU DAILY'
|
description = 'THE LARGEST CIRCULATED TELUGU DAILY'
|
||||||
language = 'te'
|
language = 'te'
|
||||||
use_embedded_content = False
|
|
||||||
remove_javascript = True
|
|
||||||
no_stylesheets = True
|
|
||||||
remove_attributes = ['height', 'width', 'style']
|
|
||||||
ignore_duplicate_articles = {'url', 'title'}
|
|
||||||
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
|
||||||
cover_url = 'https://d66zsp32hue2v.cloudfront.net/Eenadu/2022/08/08/GTH/5_01/d5041804_01_mr.jpg'
|
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
|
no_stylesheets = True
|
||||||
|
remove_javascript = True
|
||||||
|
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
||||||
|
remove_attributes = ['style', 'height', 'width']
|
||||||
|
ignore_duplicate_articles = {'url', 'title'}
|
||||||
|
reverse_article_order = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
extra_css = '.pub-t{font-size:small; font-style:italic;}'
|
simultaneous_downloads = 1
|
||||||
|
art_url = ''
|
||||||
|
|
||||||
keep_only_tags = [
|
extra_css = '''
|
||||||
dict(name='h1'),
|
img {display:block; margin:0 auto;}
|
||||||
dict(**classes('pub-t')),
|
blockquote, em {color:#202020;}
|
||||||
classes('fullstory text-justify contlist-cont'),
|
.pub-t{font-size:small; font-style:italic;}
|
||||||
dict(name='span', attrs={'id': 'PDSAIApbreak'}),
|
'''
|
||||||
]
|
|
||||||
|
|
||||||
remove_tags = [
|
keep_only_tags = [classes('bookWrapper fullstory')]
|
||||||
dict(name='span', attrs={'style': 'float:left; margin-right:10px;'}),
|
remove_tags = [classes('ext-link offset-tb1 sshare-c')]
|
||||||
dict(
|
|
||||||
name='p',
|
|
||||||
attrs={
|
|
||||||
'style':
|
|
||||||
'font-size: 18px !important; margin: 0px; margin-top: -15px; text-align: center;flex: 1;'
|
|
||||||
}
|
|
||||||
),
|
|
||||||
dict(name='aside', attrs={'class': lambda x: x and x.startswith('thumb')}),
|
|
||||||
dict(name='br'),
|
|
||||||
classes('sshare-c tags andbeyond_ad fnt20 arti more2 offset-tb1 msb-list')
|
|
||||||
]
|
|
||||||
|
|
||||||
def parse_index(self):
|
articles_are_obfuscated = True
|
||||||
section_list = [
|
|
||||||
('తెలంగాణ తాజా వార్తలు', 'telangana'),
|
def get_obfuscated_article(self, url):
|
||||||
('సంపాదకీయం', 'telangana/editorial'),
|
br = self.get_browser()
|
||||||
('తెలంగాణ ప్రధానాంశాలు', 'telangana/top-news'),
|
soup = self.index_to_soup(url)
|
||||||
('తెలంగాణ జిల్లా వార్తలు', 'telangana/districts'),
|
link = soup.a['href']
|
||||||
# ('క్రైమ్', 'crime'),
|
skip_sections =[ # add sections you want to skip
|
||||||
|
'/video/', '/videos/', '/multimedia/', 'marathi', 'hindi', 'bangla'
|
||||||
|
]
|
||||||
|
if any(x in link for x in skip_sections):
|
||||||
|
self.abort_article('skipping video links')
|
||||||
|
self.log('Found ', link)
|
||||||
|
self.art_url = link
|
||||||
|
html = br.open(link).read()
|
||||||
|
return ({ 'data': html, 'url': link })
|
||||||
|
|
||||||
|
resolve_internal_links = True
|
||||||
|
remove_empty_feeds = True
|
||||||
|
|
||||||
|
def get_cover_url(self):
|
||||||
|
import json
|
||||||
|
from datetime import date
|
||||||
|
today = quote(date.today().strftime('%d/%m/%Y'), safe='')
|
||||||
|
raw = self.index_to_soup(
|
||||||
|
'https://epaper.eenadu.net/Home/GetAllpages?editionid=1&editiondate=' + today, raw=True
|
||||||
|
)
|
||||||
|
for cov in json.loads(raw):
|
||||||
|
if cov['NewsProPageTitle'].lower().startswith('front'):
|
||||||
|
return cov['HighResolution']
|
||||||
|
|
||||||
|
feeds = []
|
||||||
|
|
||||||
|
when = '27' # hours
|
||||||
|
index = 'https://www.eenadu.net'
|
||||||
|
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=te-IN&gl=IN&ceid=IN:te'
|
||||||
|
|
||||||
|
news = index + '/telugu-news/'
|
||||||
|
news_list = [
|
||||||
|
('తెలంగాణ ప్రధానాంశాలు', 'ts-top-news'),
|
||||||
|
('సంపాదకీయం', 'editorial'),
|
||||||
|
('వ్యాఖ్యానం', 'vyakyanam'),
|
||||||
|
('హైదరాబాద్ జిల్లా వార్తలు', 'districts/Hyderabad'),
|
||||||
|
('క్రైమ్', 'crime'),
|
||||||
('పాలిటిక్స్', 'politics'),
|
('పాలిటిక్స్', 'politics'),
|
||||||
('జాతీయం', 'india'),
|
('జాతీయం', 'india'),
|
||||||
('బిజినెస్', 'business'),
|
('బిజినెస్', 'business'),
|
||||||
('అంతర్జాతీయం', 'world'),
|
('అంతర్జాతీయం', 'world'),
|
||||||
('క్రీడలు', 'sports'),
|
('క్రీడలు', 'sports'),
|
||||||
# ('సినిమా', 'movies'),
|
('సినిమా', 'movies'),
|
||||||
# ('చదువు', 'education'),
|
('వసుంధర', 'women'),
|
||||||
# ('సుఖీభవ', 'health'),
|
('ఈ-నాడు', 'technology'),
|
||||||
# ('ఈ-నాడు', 'technology'),
|
('వెబ్ ప్రత్యేకం', 'explained')
|
||||||
# ('మకరందం', 'devotional'),
|
|
||||||
# ('ఈ తరం', 'youth'),
|
|
||||||
# ('ఆహా', 'recipes'),
|
|
||||||
# ('హాయ్ బుజ్జీ', 'kids-stories'),
|
|
||||||
# ('స్థిరాస్తి', 'real-estate'),
|
|
||||||
]
|
]
|
||||||
is_sunday = date.today().weekday() == 6
|
for n in news_list:
|
||||||
if is_sunday:
|
news_index = news + n[1] + '/'
|
||||||
section_list.append(('సండే మ్యాగజైన్', 'sunday-magazine'))
|
feeds.append((n[0], a.format(when, quote(news_index, safe=''))))
|
||||||
feeds = []
|
feeds.append(('Other News', a.format(when, quote(news, safe=''))))
|
||||||
|
|
||||||
# For each section title, fetch the article urls
|
|
||||||
for section in section_list:
|
|
||||||
section_title = section[0]
|
|
||||||
section_url = 'https://www.eenadu.net/' + section[1]
|
|
||||||
self.log(section_title, section_url)
|
|
||||||
soup = self.index_to_soup(section_url)
|
|
||||||
articles = self.articles_from_soup(soup)
|
|
||||||
if articles:
|
|
||||||
feeds.append((section_title, articles))
|
|
||||||
return feeds
|
|
||||||
|
|
||||||
def articles_from_soup(self, soup):
|
art = index + '/telugu-article/'
|
||||||
ans = []
|
art_list = [
|
||||||
for link in soup.findAll(
|
('చదువు', 'education'),
|
||||||
attrs={
|
('సుఖీభవ', 'health'),
|
||||||
'class': ['telugu_uni_body', 'thumb-content-more', 'owl-carousel']
|
('ఆహా', 'recipes'),
|
||||||
}
|
('హాయ్ బుజ్జీ', 'kids-stories'),
|
||||||
):
|
('మకరందం', 'devotional'),
|
||||||
for a in link.findAll('a', attrs={'href': True}):
|
('దేవతార్చన', 'temples'),
|
||||||
url = a['href']
|
('స్థిరాస్తి', 'real-estate'),
|
||||||
if url.startswith('https') is False:
|
('కథామృతం', 'kathalu'),
|
||||||
url = 'https://www.eenadu.net/' + url
|
('సండే మ్యాగజైన్', 'sunday-magazine')
|
||||||
|
]
|
||||||
|
for x in art_list:
|
||||||
|
art_index = art + x[1] + '/'
|
||||||
|
feeds.append((x[0], a.format(when, quote(art_index, safe=''))))
|
||||||
|
feeds.append(('Other Articles', a.format(when, quote(art, safe=''))))
|
||||||
|
|
||||||
try:
|
feeds.append(('ఇతరులు', a.format(when, quote(index, safe=''))))
|
||||||
desc = self.tag_to_string(a.find('div')).strip()
|
feeds.append(('ప్రతిభ', a.format(when, 'https://pratibha.eenadu.net/')))
|
||||||
except Exception:
|
|
||||||
desc = ''
|
|
||||||
|
|
||||||
for h3 in a.findAll('h3'):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
title = self.tag_to_string(h3).strip()
|
article.url = self.art_url
|
||||||
sub = re.escape(title)
|
article.title = article.title.replace(' - Eenadu', '')
|
||||||
desc = re.sub(sub, '', desc).strip()
|
desc = soup.find(attrs={'class':'srtdes'})
|
||||||
|
if desc:
|
||||||
|
article.summary = self.tag_to_string(desc)
|
||||||
|
article.text_summary = article.summary
|
||||||
|
|
||||||
if not title or not url:
|
def preprocess_raw_html(self, raw, *a):
|
||||||
continue
|
import re
|
||||||
|
if '<!--Top Full Story Start -->' in raw:
|
||||||
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
|
body = re.search(r'<!--Top Full Story Start -->([^~]+?)<!--Tags Start -->', raw)
|
||||||
ans.append({'title': title, 'url': url, 'description': desc})
|
return '<html><body><div>' + body.group(1) + '</div></body></html>'
|
||||||
return ans
|
return raw
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
|
||||||
div = soup.find('div', **classes('pub-t'))
|
|
||||||
if div:
|
|
||||||
date = parse_date(
|
|
||||||
self.tag_to_string(div).strip().replace('Published : ', '').replace(
|
|
||||||
'Updated : ', ''
|
|
||||||
).replace(' IST', ':00.000001')
|
|
||||||
).replace(tzinfo=None)
|
|
||||||
today = datetime.now()
|
|
||||||
if (today - date) > timedelta(1.15):
|
|
||||||
self.abort_article('Skipping old article')
|
|
||||||
else:
|
|
||||||
self.abort_article('not an article')
|
|
||||||
for img in soup.findAll('img', attrs={'data-src': True}):
|
|
||||||
img['src'] = img['data-src']
|
|
||||||
return soup
|
|
||||||
|
@ -1,7 +1,4 @@
|
|||||||
import re
|
from urllib.parse import quote
|
||||||
from datetime import date, datetime, timedelta
|
|
||||||
|
|
||||||
from calibre.utils.date import parse_date
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -10,137 +7,116 @@ class eenadu_ap(BasicNewsRecipe):
|
|||||||
__author__ = 'unkn0wn'
|
__author__ = 'unkn0wn'
|
||||||
description = 'THE LARGEST CIRCULATED TELUGU DAILY'
|
description = 'THE LARGEST CIRCULATED TELUGU DAILY'
|
||||||
language = 'te'
|
language = 'te'
|
||||||
use_embedded_content = False
|
|
||||||
remove_javascript = True
|
|
||||||
no_stylesheets = True
|
|
||||||
remove_attributes = ['height', 'width', 'style']
|
|
||||||
ignore_duplicate_articles = {'url', 'title'}
|
|
||||||
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
|
||||||
cover_url = 'https://d66zsp32hue2v.cloudfront.net/Eenadu/2022/08/03/CAN/5_01/bfff5654_01_mr.jpg'
|
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
|
no_stylesheets = True
|
||||||
|
remove_javascript = True
|
||||||
|
masthead_url = 'https://dxxd96tbpm203.cloudfront.net//img/logo.png'
|
||||||
|
remove_attributes = ['style', 'height', 'width']
|
||||||
|
ignore_duplicate_articles = {'url', 'title'}
|
||||||
|
reverse_article_order = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
extra_css = '.pub-t{font-size:small; font-style:italic;}'
|
simultaneous_downloads = 1
|
||||||
|
art_url = ''
|
||||||
|
|
||||||
keep_only_tags = [
|
extra_css = '''
|
||||||
dict(name='h1'),
|
img {display:block; margin:0 auto;}
|
||||||
dict(**classes('pub-t')),
|
blockquote, em {color:#202020;}
|
||||||
classes('fullstory text-justify contlist-cont'),
|
.pub-t{font-size:small; font-style:italic;}
|
||||||
dict(name='span', attrs={'id': 'PDSAIApbreak'}),
|
'''
|
||||||
]
|
|
||||||
|
|
||||||
remove_tags = [
|
keep_only_tags = [classes('bookWrapper fullstory')]
|
||||||
dict(name='span', attrs={'style': 'float:left; margin-right:10px;'}),
|
remove_tags = [classes('ext-link offset-tb1 sshare-c')]
|
||||||
dict(
|
|
||||||
name='p',
|
articles_are_obfuscated = True
|
||||||
attrs={
|
|
||||||
'style':
|
def get_obfuscated_article(self, url):
|
||||||
'font-size: 18px !important; margin: 0px; margin-top: -15px; text-align: center;flex: 1;'
|
br = self.get_browser()
|
||||||
}
|
soup = self.index_to_soup(url)
|
||||||
),
|
link = soup.a['href']
|
||||||
dict(name='aside', attrs={'class': lambda x: x and x.startswith('thumb')}),
|
skip_sections =[ # add sections you want to skip
|
||||||
dict(name='br'),
|
'/video/', '/videos/', '/multimedia/', 'marathi', 'hindi', 'bangla'
|
||||||
classes('sshare-c tags andbeyond_ad fnt20 arti more2 offset-tb1 msb-list')
|
]
|
||||||
]
|
if any(x in link for x in skip_sections):
|
||||||
|
self.abort_article('skipping video links')
|
||||||
|
self.log('Found ', link)
|
||||||
|
self.art_url = link
|
||||||
|
html = br.open(link).read()
|
||||||
|
return ({ 'data': html, 'url': link })
|
||||||
|
|
||||||
|
resolve_internal_links = True
|
||||||
|
remove_empty_feeds = True
|
||||||
|
|
||||||
def get_cover_url(self):
|
def get_cover_url(self):
|
||||||
|
import json
|
||||||
from datetime import date
|
from datetime import date
|
||||||
cover = 'https://img.kiosko.net/' + str(
|
today = quote(date.today().strftime('%d/%m/%Y'), safe='')
|
||||||
date.today().year
|
raw = self.index_to_soup(
|
||||||
) + '/' + date.today().strftime('%m') + '/' + date.today(
|
'https://epaper.eenadu.net/Home/GetAllpages?editionid=2&editiondate=' + today, raw=True
|
||||||
).strftime('%d') + '/in/eenadu.750.jpg'
|
)
|
||||||
br = BasicNewsRecipe.get_browser(self, verify_ssl_certificates=False)
|
for cov in json.loads(raw):
|
||||||
try:
|
if cov['NewsProPageTitle'].lower().startswith('front'):
|
||||||
br.open(cover)
|
return cov['HighResolution']
|
||||||
except:
|
|
||||||
index = 'https://es.kiosko.net/in/np/eenadu.html'
|
|
||||||
soup = self.index_to_soup(index)
|
|
||||||
for image in soup.findAll('img', src=True):
|
|
||||||
if image['src'].endswith('750.jpg'):
|
|
||||||
return 'https:' + image['src']
|
|
||||||
self.log("\nCover unavailable")
|
|
||||||
cover = None
|
|
||||||
return cover
|
|
||||||
|
|
||||||
def parse_index(self):
|
feeds = []
|
||||||
section_list = [
|
|
||||||
('ఆంధ్రప్రదేశ్ తాజా వార్తలు', 'andhra-pradesh'),
|
when = '27' # hours
|
||||||
('సంపాదకీయం', 'andhra-pradesh/editorial'),
|
index = 'https://www.eenadu.net'
|
||||||
('ఆంధ్రప్రదేశ్ ప్రధానాంశాలు', 'andhra-pradesh/top-news'),
|
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=te-IN&gl=IN&ceid=IN:te'
|
||||||
('ఆంధ్రప్రదేశ్ జిల్లా వార్తలు', 'andhra-pradesh/districts'),
|
|
||||||
# ('క్రైమ్', 'crime'),
|
news = index + '/telugu-news/'
|
||||||
|
news_list = [
|
||||||
|
('ఆంధ్రప్రదేశ్ ప్రధానాంశాలు', 'ap-top-news'),
|
||||||
|
('సంపాదకీయం', 'editorial'),
|
||||||
|
('వ్యాఖ్యానం', 'vyakyanam'),
|
||||||
|
('విశాఖపట్నం జిల్లా వార్తలు', 'districts/Visakhapatnam'),
|
||||||
|
('క్రైమ్', 'crime'),
|
||||||
('పాలిటిక్స్', 'politics'),
|
('పాలిటిక్స్', 'politics'),
|
||||||
('జాతీయం', 'india'),
|
('జాతీయం', 'india'),
|
||||||
('బిజినెస్', 'business'),
|
('బిజినెస్', 'business'),
|
||||||
('అంతర్జాతీయం', 'world'),
|
('అంతర్జాతీయం', 'world'),
|
||||||
('క్రీడలు', 'sports'),
|
('క్రీడలు', 'sports'),
|
||||||
# ('సినిమా', 'movies'),
|
('సినిమా', 'movies'),
|
||||||
# ('చదువు', 'education'),
|
('వసుంధర', 'women'),
|
||||||
# ('సుఖీభవ', 'health'),
|
('ఈ-నాడు', 'technology'),
|
||||||
# ('ఈ-నాడు', 'technology'),
|
('వెబ్ ప్రత్యేకం', 'explained')
|
||||||
# ('మకరందం', 'devotional'),
|
|
||||||
# ('ఈ తరం', 'youth'),
|
|
||||||
# ('ఆహా', 'recipes'),
|
|
||||||
# ('హాయ్ బుజ్జీ', 'kids-stories'),
|
|
||||||
# ('స్థిరాస్తి', 'real-estate'),
|
|
||||||
]
|
]
|
||||||
is_sunday = date.today().weekday() == 6
|
for n in news_list:
|
||||||
if is_sunday:
|
news_index = news + n[1] + '/'
|
||||||
section_list.append(('సండే మ్యాగజైన్', 'sunday-magazine'))
|
feeds.append((n[0], a.format(when, quote(news_index, safe=''))))
|
||||||
feeds = []
|
feeds.append(('Other News', a.format(when, quote(news, safe=''))))
|
||||||
|
|
||||||
# For each section title, fetch the article urls
|
|
||||||
for section in section_list:
|
|
||||||
section_title = section[0]
|
|
||||||
section_url = 'https://www.eenadu.net/' + section[1]
|
|
||||||
self.log(section_title, section_url)
|
|
||||||
soup = self.index_to_soup(section_url)
|
|
||||||
articles = self.articles_from_soup(soup)
|
|
||||||
if articles:
|
|
||||||
feeds.append((section_title, articles))
|
|
||||||
return feeds
|
|
||||||
|
|
||||||
def articles_from_soup(self, soup):
|
art = index + '/telugu-article/'
|
||||||
ans = []
|
art_list = [
|
||||||
for link in soup.findAll(
|
('చదువు', 'education'),
|
||||||
attrs={
|
('సుఖీభవ', 'health'),
|
||||||
'class': ['telugu_uni_body', 'thumb-content-more', 'owl-carousel']
|
('ఆహా', 'recipes'),
|
||||||
}
|
('హాయ్ బుజ్జీ', 'kids-stories'),
|
||||||
):
|
('మకరందం', 'devotional'),
|
||||||
for a in link.findAll('a', attrs={'href': True}):
|
('దేవతార్చన', 'temples'),
|
||||||
url = a['href']
|
('స్థిరాస్తి', 'real-estate'),
|
||||||
if url.startswith('https') is False:
|
('కథామృతం', 'kathalu'),
|
||||||
url = 'https://www.eenadu.net/' + url
|
('సండే మ్యాగజైన్', 'sunday-magazine')
|
||||||
|
]
|
||||||
|
for x in art_list:
|
||||||
|
art_index = art + x[1] + '/'
|
||||||
|
feeds.append((x[0], a.format(when, quote(art_index, safe=''))))
|
||||||
|
feeds.append(('Other Articles', a.format(when, quote(art, safe=''))))
|
||||||
|
|
||||||
try:
|
feeds.append(('ఇతరులు', a.format(when, quote(index, safe=''))))
|
||||||
desc = self.tag_to_string(a.find('div')).strip()
|
feeds.append(('ప్రతిభ', a.format(when, 'https://pratibha.eenadu.net/')))
|
||||||
except Exception:
|
|
||||||
desc = ''
|
|
||||||
|
|
||||||
for h3 in a.findAll('h3'):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
title = self.tag_to_string(h3).strip()
|
article.url = self.art_url
|
||||||
sub = re.escape(title)
|
article.title = article.title.replace(' - Eenadu', '')
|
||||||
desc = re.sub(sub, '', desc).strip()
|
desc = soup.find(attrs={'class':'srtdes'})
|
||||||
|
if desc:
|
||||||
|
article.summary = self.tag_to_string(desc)
|
||||||
|
article.text_summary = article.summary
|
||||||
|
|
||||||
if not title or not url:
|
def preprocess_raw_html(self, raw, *a):
|
||||||
continue
|
import re
|
||||||
|
if '<!--Top Full Story Start -->' in raw:
|
||||||
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
|
body = re.search(r'<!--Top Full Story Start -->([^~]+?)<!--Tags Start -->', raw)
|
||||||
ans.append({'title': title, 'url': url, 'description': desc})
|
return '<html><body><div>' + body.group(1) + '</div></body></html>'
|
||||||
return ans
|
return raw
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
|
||||||
div = soup.find('div', **classes('pub-t'))
|
|
||||||
if div:
|
|
||||||
date = parse_date(
|
|
||||||
self.tag_to_string(div).strip().replace('Published : ', '').replace(
|
|
||||||
'Updated : ', ''
|
|
||||||
).replace(' IST', ':00.000001')
|
|
||||||
).replace(tzinfo=None)
|
|
||||||
today = datetime.now()
|
|
||||||
if (today - date) > timedelta(1.15):
|
|
||||||
self.abort_article('Skipping old article')
|
|
||||||
else:
|
|
||||||
self.abort_article('not an article')
|
|
||||||
for img in soup.findAll('img', attrs={'data-src': True}):
|
|
||||||
img['src'] = img['data-src']
|
|
||||||
return soup
|
|
||||||
|
@ -54,7 +54,7 @@ def parse_inline(inl):
|
|||||||
yield '<div class="img"><img src="{}"></div>'.format(props['image']['src'])
|
yield '<div class="img"><img src="{}"></div>'.format(props['image']['src'])
|
||||||
if 'caption' in props:
|
if 'caption' in props:
|
||||||
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
||||||
props['caption']['text'], ' ' + props['caption']['credit']
|
props['caption'].get('text', ''), ' ' + props['caption'].get('credit', '')
|
||||||
)
|
)
|
||||||
yield '</p>'
|
yield '</p>'
|
||||||
if inl.get('content', {}).get('name', '') == 'ImageGroup':
|
if inl.get('content', {}).get('name', '') == 'ImageGroup':
|
||||||
@ -65,7 +65,7 @@ def parse_inline(inl):
|
|||||||
yield '<div class="img"><img src="{}"></div>'.format(imgs['src'])
|
yield '<div class="img"><img src="{}"></div>'.format(imgs['src'])
|
||||||
if 'caption' in imgs:
|
if 'caption' in imgs:
|
||||||
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
||||||
imgs['caption']['text'], ' ' + imgs['caption']['credit']
|
imgs['caption'].get('text', ''), ' ' + imgs['caption'].get('credit', '')
|
||||||
)
|
)
|
||||||
yield '</p>'
|
yield '</p>'
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ def parse_inline(inl):
|
|||||||
yield '<div class="img"><img src="{}"></div>'.format(props['image']['src'])
|
yield '<div class="img"><img src="{}"></div>'.format(props['image']['src'])
|
||||||
if 'caption' in props:
|
if 'caption' in props:
|
||||||
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
||||||
props['caption']['text'], ' ' + props['caption']['credit']
|
props['caption'].get('text', ''), ' ' + props['caption'].get('credit', '')
|
||||||
)
|
)
|
||||||
yield '</p>'
|
yield '</p>'
|
||||||
if inl.get('content', {}).get('name', '') == 'ImageGroup':
|
if inl.get('content', {}).get('name', '') == 'ImageGroup':
|
||||||
@ -64,7 +64,7 @@ def parse_inline(inl):
|
|||||||
yield '<div class="img"><img src="{}"></div>'.format(imgs['src'])
|
yield '<div class="img"><img src="{}"></div>'.format(imgs['src'])
|
||||||
if 'caption' in imgs:
|
if 'caption' in imgs:
|
||||||
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
||||||
imgs['caption']['text'], ' ' + imgs['caption']['credit']
|
imgs['caption'].get('text', ''), ' ' + imgs['caption'].get('credit', '')
|
||||||
)
|
)
|
||||||
yield '</p>'
|
yield '</p>'
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ def parse_inline(inl):
|
|||||||
yield '<div class="img"><img src="{}"></div>'.format(props['image']['src'])
|
yield '<div class="img"><img src="{}"></div>'.format(props['image']['src'])
|
||||||
if 'caption' in props:
|
if 'caption' in props:
|
||||||
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
||||||
props['caption']['text'], ' ' + props['caption']['credit']
|
props['caption'].get('text', ''), ' ' + props['caption'].get('credit', '')
|
||||||
)
|
)
|
||||||
yield '</p>'
|
yield '</p>'
|
||||||
if inl.get('content', {}).get('name', '') == 'ImageGroup':
|
if inl.get('content', {}).get('name', '') == 'ImageGroup':
|
||||||
@ -69,7 +69,7 @@ def parse_inline(inl):
|
|||||||
yield '<div class="img"><img src="{}"></div>'.format(imgs['src'])
|
yield '<div class="img"><img src="{}"></div>'.format(imgs['src'])
|
||||||
if 'caption' in imgs:
|
if 'caption' in imgs:
|
||||||
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
|
||||||
imgs['caption']['text'], ' ' + imgs['caption']['credit']
|
imgs['caption'].get('text', ''), ' ' + imgs['caption'].get('credit', '')
|
||||||
)
|
)
|
||||||
yield '</p>'
|
yield '</p>'
|
||||||
|
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
|
from urllib.parse import quote
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
class ORF(BasicNewsRecipe):
|
class ORF(BasicNewsRecipe):
|
||||||
title = u'Observer Research Foundation'
|
title = 'Observer Research Foundation'
|
||||||
description = (
|
description = (
|
||||||
'Set up in 1990, ORF seeks to lead and aid policy thinking towards building a strong and prosperous India'
|
'Set up in 1990, ORF seeks to lead and aid policy thinking towards building a strong and prosperous India'
|
||||||
' in a fair and equitable world. It helps discover and inform India’s choices, and carries Indian voices '
|
' in a fair and equitable world. It helps discover and inform India’s choices, and carries Indian voices '
|
||||||
@ -10,52 +11,75 @@ class ORF(BasicNewsRecipe):
|
|||||||
)
|
)
|
||||||
language = 'en_IN'
|
language = 'en_IN'
|
||||||
__author__ = 'unkn0wn'
|
__author__ = 'unkn0wn'
|
||||||
oldest_article = 7.5 # days
|
|
||||||
max_articles_per_feed = 25
|
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
|
no_stylesheets = True
|
||||||
|
remove_javascript = True
|
||||||
masthead_url = 'https://www.orfonline.org/wp-content/uploads/2015/09/Logo_ORF_JPEG.jpg'
|
masthead_url = 'https://www.orfonline.org/wp-content/uploads/2015/09/Logo_ORF_JPEG.jpg'
|
||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
ignore_duplicate_articles = {'url'}
|
ignore_duplicate_articles = {'url', 'title'}
|
||||||
|
reverse_article_order = True
|
||||||
|
remove_empty_feeds = True
|
||||||
|
simultaneous_downloads = 1
|
||||||
|
art_url = ''
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
.report-slider {font-size:small; color:#404040;}
|
img {display:block; margin:0 auto;}
|
||||||
|
.report-slider, .author_panel {font-size:small; color:#404040;}
|
||||||
.report {font-size:small; font-weight:bold;}
|
.report {font-size:small; font-weight:bold;}
|
||||||
.excert-italic, .recent-block-people {font-style:italic; color:#202020;}
|
.excert-italic, .recent-block-people {font-style:italic; color:#202020;}
|
||||||
blockquote, em {color:#202020;}
|
blockquote, em {color:#202020;}
|
||||||
|
.espert_speak_panel {font-size:small;}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
articles_are_obfuscated = True
|
||||||
|
|
||||||
|
def get_obfuscated_article(self, url):
|
||||||
|
br = self.get_browser()
|
||||||
|
soup = self.index_to_soup(url)
|
||||||
|
link = soup.a['href']
|
||||||
|
skip_sections =[ # add sections you want to skip
|
||||||
|
'/video/', '/videos/', '/multimedia/', 'marathi', 'hindi', 'bangla'
|
||||||
|
]
|
||||||
|
if any(x in link for x in skip_sections):
|
||||||
|
self.abort_article('skipping video links ', link)
|
||||||
|
self.log('Found ', link)
|
||||||
|
self.art_url = link
|
||||||
|
html = br.open(link).read()
|
||||||
|
return ({ 'data': html, 'url': link })
|
||||||
|
|
||||||
|
|
||||||
def get_browser(self):
|
def get_browser(self):
|
||||||
return BasicNewsRecipe.get_browser(self, user_agent='common_words/based')
|
return BasicNewsRecipe.get_browser(self, user_agent='common_words/based')
|
||||||
|
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
keep_only_tags = [classes('recent-updates-block recent-block-people')]
|
keep_only_tags = [
|
||||||
|
dict(name='h1'),
|
||||||
|
classes('author_panel espert_speak_panel expert_panel_content')
|
||||||
|
]
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
classes(
|
classes(
|
||||||
'social socialshare comment-area-section telegramhtml post-tag '
|
'social socialshare comment-area-section telegramhtml post-tag '
|
||||||
'research-prev research-next'
|
'research-prev research-next col-md-4 button_group sharethis-p tags'
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
feeds = [
|
feeds = []
|
||||||
('Commentaries', 'https://www.orfonline.org/content-type/commentary/feed/'),
|
|
||||||
('Expert Speak', 'https://www.orfonline.org/expert-speak/feed/'),
|
|
||||||
('Books and Monographs', 'https://www.orfonline.org/content-type/books/feed/'),
|
|
||||||
('Event Reports', 'https://www.orfonline.org/content-type/event-reports/feed/'),
|
|
||||||
('Events', 'https://www.orfonline.org/content-type/events/feed/'),
|
|
||||||
('Forums', 'https://www.orfonline.org/content-type/forums/feed/'),
|
|
||||||
('GP-ORF Series', 'https://www.orfonline.org/content-type/gp-orf-series/feed/'),
|
|
||||||
('Issue Briefs & Special Reports', 'https://www.orfonline.org/content-type/issue-brief/feed/'),
|
|
||||||
('Monitors', 'https://www.orfonline.org/content-type/monitors/feed/'),
|
|
||||||
('Occasional Papers', 'https://www.orfonline.org/content-type/occasional-paper/feed/'),
|
|
||||||
('Primer', 'https://www.orfonline.org/content-type/primer/feed/'),
|
|
||||||
('Series', 'https://www.orfonline.org/content-type/series/feed/'),
|
|
||||||
('Surveys & Polls', 'https://www.orfonline.org/content-type/surveys-polls/feed/'),
|
|
||||||
('Young Voices', 'https://www.orfonline.org/content-type/young-voices/feed/'),
|
|
||||||
]
|
|
||||||
|
|
||||||
def print_version(self, url):
|
when = '170' # hours > 7 days
|
||||||
if 'marathi' in url or 'hindi' in url or 'bangla' in url:
|
index = 'https://www.orfonline.org'
|
||||||
return ''
|
|
||||||
return url
|
sections = [
|
||||||
|
'expert-speak', 'books', 'event-reports', 'events', 'forums', 'gp-orf-series', 'issue-brief', 'monitors',
|
||||||
|
'occasional-paper', 'primer', 'series', 'surveys-polls', 'young-voices', 'research'
|
||||||
|
]
|
||||||
|
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-IN&gl=IN&ceid=IN:en'
|
||||||
|
for sec in sections:
|
||||||
|
sec_index = index + '/' + sec + '/'
|
||||||
|
feeds.append((sec.capitalize(), a.format(when, quote(sec_index, safe=''))))
|
||||||
|
feeds.append(('Others', a.format(when, quote(index, safe=''))))
|
||||||
|
|
||||||
|
def populate_article_metadata(self, article, soup, first):
|
||||||
|
article.url = self.art_url
|
||||||
|
article.title = article.title.replace(' - Observer Research Foundation', '')
|
||||||
|
Loading…
x
Reference in New Issue
Block a user