mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update The Hindu
This commit is contained in:
parent
524f70ccec
commit
423fbbed4a
@ -1,141 +1,99 @@
|
|||||||
#!/usr/bin/env python
|
import json
|
||||||
# vim:fileencoding=utf-8
|
import re
|
||||||
# License: GPLv3 Copyright: 2009, Kovid Goyal <kovid at kovidgoyal.net>
|
from collections import defaultdict
|
||||||
|
from datetime import date
|
||||||
import string
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
from calibre import entity_to_unicode
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
|
||||||
|
|
||||||
|
|
||||||
def classes(classes):
|
def absurl(url):
|
||||||
q = frozenset(classes.split(' '))
|
if url.startswith('/'):
|
||||||
return dict(
|
url = 'https://www.thehindu.com' + url
|
||||||
attrs={'class': lambda x: x and frozenset(x.split()).intersection(q)})
|
return url
|
||||||
|
|
||||||
|
|
||||||
|
local_edition = None
|
||||||
|
# Chennai is default edition, for other editions use 'th_hyderabad', 'th_bangalore', 'th_delhi', 'th_kolkata' etc
|
||||||
|
|
||||||
|
|
||||||
class TheHindu(BasicNewsRecipe):
|
class TheHindu(BasicNewsRecipe):
|
||||||
title = u'The Hindu'
|
title = 'The Hindu'
|
||||||
|
__author__ = 'unkn0wn'
|
||||||
language = 'en_IN'
|
language = 'en_IN'
|
||||||
epaper_url = 'https://epaper.thehindu.com'
|
|
||||||
|
|
||||||
oldest_article = 1
|
|
||||||
__author__ = 'Kovid Goyal'
|
|
||||||
max_articles_per_feed = 100
|
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
|
masthead_url = 'https://www.thehindu.com/theme/images/th-online/thehindu-logo.svg'
|
||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
extra_css = '.lead-img-cont { text-align: center; } ' \
|
extra_css = '.caption{font-size:small; text-align:center;}'\
|
||||||
'.lead-img-caption { font-size: small; font-style: italic; } ' \
|
'.author{font-size:small;}'
|
||||||
'.mobile-author-cont { font-size: small; text-transform: uppercase; } ' \
|
|
||||||
'.intro ~ .intro, .update-time, .ksl-time-stamp * { display: none; } '
|
ignore_duplicate_articles = {'url'}
|
||||||
|
|
||||||
ignore_duplicate_articles = {'title', 'url'}
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
dict(name='h1', attrs={'class': ['title', 'special-article-heading']}),
|
classes('article-section ')
|
||||||
classes('lead-img-cont mobile-author-cont photo-collage intro'),
|
|
||||||
dict(id=lambda x: x and x.startswith('content-body-')),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
def get_browser(self):
|
remove_tags = [
|
||||||
br = BasicNewsRecipe.get_browser(self, user_agent='common_words/based')
|
classes('hide-mobile comments-shares share-page editiondetails')
|
||||||
br.addheaders += [('Referer', self.epaper_url)] # needed for fetching cover
|
]
|
||||||
# br.set_debug_http(True)
|
|
||||||
return br
|
|
||||||
|
|
||||||
def get_cover_url(self):
|
|
||||||
url = self.index_to_soup(self.epaper_url + '/Login/DefaultImage', raw=True)
|
|
||||||
return url.replace(br'\\', b'/').decode('utf-8')[1:-1]
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
img = soup.find('img', attrs={'class': 'lead-img'})
|
for img in soup.findAll('img', attrs={'data-original':True}):
|
||||||
try:
|
img['src'] = img['data-original']
|
||||||
for i, source in enumerate(tuple(img.parent.findAll('source', srcset=True))):
|
|
||||||
if i == 0:
|
|
||||||
img['src'] = source['srcset'].split()[0]
|
|
||||||
source.extract()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
# for img in soup.findAll(attrs={'data-original': True}):
|
|
||||||
# img['src'] = img['data-original']
|
|
||||||
# Place intro beneath the title, skip duplicates
|
|
||||||
try:
|
|
||||||
soup.h1.insert_after(soup.find('h2', attrs={'class': 'intro'}))
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
# Remove ',' from location tag
|
|
||||||
ts = soup.find('span', attrs={'class': 'ksl-time-stamp'})
|
|
||||||
if ts and ts.string:
|
|
||||||
ts.string = ts.string.split(',')[0]
|
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
def populate_article_metadata(self, article, soup, first):
|
def get_cover_url(self):
|
||||||
|
cover = 'https://img.kiosko.net/' + str(
|
||||||
|
date.today().year
|
||||||
|
) + '/' + date.today().strftime('%m') + '/' + date.today(
|
||||||
|
).strftime('%d') + '/in/hindu.750.jpg'
|
||||||
|
br = BasicNewsRecipe.get_browser(self)
|
||||||
try:
|
try:
|
||||||
desc = soup.find('meta', attrs={'name': 'description'}).get('content')
|
br.open(cover)
|
||||||
if not desc.startswith('Todays paper'):
|
except:
|
||||||
desc += '...' if len(desc) >= 199 else '' # indicate truncation
|
index = 'https://en.kiosko.net/in/np/hindu.html'
|
||||||
article.text_summary = article.summary = entity_to_unicode(desc)
|
soup = self.index_to_soup(index)
|
||||||
except AttributeError:
|
for image in soup.findAll('img', src=True):
|
||||||
return
|
if image['src'].endswith('750.jpg'):
|
||||||
|
return image['src']
|
||||||
def articles_from_soup(self, soup):
|
self.log("\nCover unavailable")
|
||||||
ans = []
|
cover = None
|
||||||
div = soup.find('section', attrs={'id': 'section_1'})
|
return cover
|
||||||
if div is None:
|
|
||||||
return ans
|
|
||||||
for ul in div.findAll('ul', attrs={'class': 'archive-list'}):
|
|
||||||
for x in ul.findAll(['a']):
|
|
||||||
title = self.tag_to_string(x)
|
|
||||||
url = x.get('href', False)
|
|
||||||
if not url or not title:
|
|
||||||
continue
|
|
||||||
self.log('\t\tFound article:', title)
|
|
||||||
self.log('\t\t\t', url)
|
|
||||||
ans.append({
|
|
||||||
'title': title,
|
|
||||||
'url': url,
|
|
||||||
'description': '',
|
|
||||||
'date': ''})
|
|
||||||
return ans
|
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
# return [('xxx', [
|
if local_edition:
|
||||||
# {'title':'xxx', 'url':'http://www.thehindu.com/opinion/op-ed/rohingya-bangladeshs-burden-to-bear/article19694058.ece'},
|
yr = str(date.today().year)
|
||||||
# {'title':'yyy', 'url':'http://www.thehindu.com/sci-tech/energy-and-environment/on-river-washed-antique-plains/article19699327.ece'}
|
mn = date.today().strftime('%m')
|
||||||
# ])]
|
dy = date.today().strftime('%d')
|
||||||
soup = self.index_to_soup('https://www.thehindu.com/todays-paper/')
|
url = 'https://www.thehindu.com/todays-paper/' + yr + '-' + mn + '-' + dy + '/' + local_edition + '/'
|
||||||
nav_div = soup.find(id='subnav-tpbar-latest')
|
else:
|
||||||
section_list = []
|
url = 'https://www.thehindu.com/todays-paper/'
|
||||||
|
raw = self.index_to_soup(url, raw=True)
|
||||||
|
soup = self.index_to_soup(raw)
|
||||||
|
ans = self.hindu_parse_index(soup)
|
||||||
|
if not ans:
|
||||||
|
raise ValueError(
|
||||||
|
'The Hindu Newspaper is not published Today.'
|
||||||
|
)
|
||||||
|
return ans
|
||||||
|
|
||||||
# Finding all the section titles that are acceptable
|
def hindu_parse_index(self, soup):
|
||||||
for x in nav_div.findAll(['a']):
|
for script in soup.findAll('script'):
|
||||||
if self.is_accepted_entry(x):
|
if not self.tag_to_string(script).strip().startswith('let grouped_articles = {}'):
|
||||||
section_list.append(
|
continue
|
||||||
(string.capwords(self.tag_to_string(x)), x['href']))
|
if script is not None:
|
||||||
feeds = []
|
art = re.search(r'grouped_articles = ({\"[^<]+?]})', self.tag_to_string(script))
|
||||||
|
data = json.loads(art.group(1))
|
||||||
|
|
||||||
# For each section title, fetch the article urls
|
feeds_dict = defaultdict(list)
|
||||||
for section in section_list:
|
|
||||||
section_title = section[0]
|
|
||||||
section_url = section[1]
|
|
||||||
self.log('Found section:', section_title, section_url)
|
|
||||||
soup = self.index_to_soup(section_url)
|
|
||||||
articles = self.articles_from_soup(soup)
|
|
||||||
if articles:
|
|
||||||
feeds.append((section_title, articles))
|
|
||||||
|
|
||||||
return feeds
|
a = json.dumps(data)
|
||||||
|
for sec in json.loads(a):
|
||||||
def is_accepted_entry(self, entry):
|
for item in data[sec]:
|
||||||
# Those sections in the top nav bar that we will omit
|
section = sec.replace('TH_', '')
|
||||||
omit_list = [
|
title = item['articleheadline']
|
||||||
'tp-tamilnadu', 'tp-karnataka', 'tp-kerala', 'tp-andhrapradesh',
|
url = absurl(item['href'])
|
||||||
'tp-telangana', 'tp-newdelhi', 'tp-mumbai', 'tp-otherstates',
|
desc = 'from page no.' + item['pageno'] + ' | ' + item['teaser_text'] or ''
|
||||||
'tp-in-school', 'tp-metroplus', 'tp-youngworld', 'tp-fridayreview',
|
self.log('\t', title, '\n\t\t', url)
|
||||||
'tp-downtown', 'tp-bookreview', 'tp-others']
|
feeds_dict[section].append({"title": title, "url": url, "description": desc})
|
||||||
|
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||||
is_accepted = True
|
else:
|
||||||
for omit_entry in omit_list:
|
return []
|
||||||
if entry['href'][0:-1].endswith(omit_entry):
|
|
||||||
is_accepted = False
|
|
||||||
break
|
|
||||||
return is_accepted
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user