mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update Business Today
This commit is contained in:
parent
b6812939c9
commit
791857d630
@ -74,7 +74,7 @@ class BT(BasicNewsRecipe):
|
|||||||
|
|
||||||
# Insert feeds in specified order, if available
|
# Insert feeds in specified order, if available
|
||||||
|
|
||||||
feedSort = ['Editor\'s Note']
|
feedSort = ['Editor\'s Note', 'Editors note']
|
||||||
for i in feedSort:
|
for i in feedSort:
|
||||||
if i in sections:
|
if i in sections:
|
||||||
feeds.append((i, sections[i]))
|
feeds.append((i, sections[i]))
|
||||||
@ -82,7 +82,8 @@ class BT(BasicNewsRecipe):
|
|||||||
# Done with the sorted feeds
|
# Done with the sorted feeds
|
||||||
|
|
||||||
for i in feedSort:
|
for i in feedSort:
|
||||||
del sections[i]
|
if i in sections:
|
||||||
|
del sections[i]
|
||||||
|
|
||||||
# Append what is left over...
|
# Append what is left over...
|
||||||
|
|
||||||
|
@ -1,94 +0,0 @@
|
|||||||
import json
|
|
||||||
import re
|
|
||||||
from collections import defaultdict
|
|
||||||
from datetime import date
|
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
|
||||||
|
|
||||||
|
|
||||||
def absurl(url):
|
|
||||||
if url.startswith('/'):
|
|
||||||
url = 'https://www.thehindubusinessline.com' + url
|
|
||||||
return url
|
|
||||||
|
|
||||||
|
|
||||||
local_edition = None
|
|
||||||
# Chennai is default edition, for other editions use 'bl_hyderabad', 'bl_bangalore', 'bl_mumbai'
|
|
||||||
|
|
||||||
|
|
||||||
class BusinessLine(BasicNewsRecipe):
|
|
||||||
title = 'The Hindu BusinessLine | Print Edition'
|
|
||||||
__author__ = 'unkn0wn'
|
|
||||||
description = (
|
|
||||||
'The Hindu BusinessLine is known for its credibility, accuracy, in-depth analysis of markets and sober coverage'
|
|
||||||
' of business news. BusinessLine reduces the daily grind of business to relevant, readable, byte-sized stories.'
|
|
||||||
' The newspaper is extensively followed by the decision makers and change leaders from the world of business.'
|
|
||||||
)
|
|
||||||
language = 'en_IN'
|
|
||||||
no_stylesheets = True
|
|
||||||
masthead_url = 'https://www.thehindubusinessline.com/theme/images/bl-online/bllogo.png'
|
|
||||||
remove_attributes = ['style', 'height', 'width']
|
|
||||||
extra_css = '.caption{font-size:small; text-align:center;}'\
|
|
||||||
'.author{font-size:small; font-weight:bold;}'\
|
|
||||||
'.subhead, .subhead_lead {font-weight:bold;}'\
|
|
||||||
'img {display:block; margin:0 auto;}'
|
|
||||||
|
|
||||||
ignore_duplicate_articles = {'url'}
|
|
||||||
|
|
||||||
keep_only_tags = [
|
|
||||||
classes('articlepage')
|
|
||||||
]
|
|
||||||
|
|
||||||
remove_tags = [
|
|
||||||
classes('hide-mobile comments-shares share-page editiondetails author-img')
|
|
||||||
]
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
|
||||||
for cap in soup.findAll('p', attrs={'class':'caption'}):
|
|
||||||
cap.name = 'figcaption'
|
|
||||||
for img in soup.findAll('img', attrs={'data-original':True}):
|
|
||||||
img['src'] = img['data-original']
|
|
||||||
return soup
|
|
||||||
|
|
||||||
def parse_index(self):
|
|
||||||
dt = date.today().strftime('%Y-%m-%d')
|
|
||||||
# For past editions, set date to, for example, '2023-01-28'
|
|
||||||
# dt = '2023-01-28'
|
|
||||||
if local_edition:
|
|
||||||
url = absurl('/todays-paper/' + dt + '/' + local_edition + '/')
|
|
||||||
else:
|
|
||||||
url = absurl('/todays-paper/' + dt + '/bl_chennai/')
|
|
||||||
raw = self.index_to_soup(url, raw=True)
|
|
||||||
soup = self.index_to_soup(raw)
|
|
||||||
ans = self.hindu_parse_index(soup)
|
|
||||||
if not ans:
|
|
||||||
raise ValueError(
|
|
||||||
'The Hindu BusinessLine Newspaper is not published Today.'
|
|
||||||
)
|
|
||||||
cover = soup.find(attrs={'class':'hindu-ad'})
|
|
||||||
if cover:
|
|
||||||
self.cover_url = cover.img['src']
|
|
||||||
return ans
|
|
||||||
|
|
||||||
def hindu_parse_index(self, soup):
|
|
||||||
for script in soup.findAll('script'):
|
|
||||||
if not self.tag_to_string(script).strip().startswith('let grouped_articles = {}'):
|
|
||||||
continue
|
|
||||||
if script is not None:
|
|
||||||
art = re.search(r'grouped_articles = ({\".*)', self.tag_to_string(script))
|
|
||||||
data = json.JSONDecoder().raw_decode(art.group(1))[0]
|
|
||||||
|
|
||||||
feeds_dict = defaultdict(list)
|
|
||||||
|
|
||||||
a = json.dumps(data)
|
|
||||||
for sec in json.loads(a):
|
|
||||||
for item in data[sec]:
|
|
||||||
section = sec.replace('BL_', '')
|
|
||||||
title = item['articleheadline']
|
|
||||||
url = absurl(item['href'])
|
|
||||||
desc = 'Page no.' + item['pageno'] + ' | ' + item['teaser_text'] or ''
|
|
||||||
self.log('\t', title, '\n\t\t', url)
|
|
||||||
feeds_dict[section].append({"title": title, "url": url, "description": desc})
|
|
||||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
|
||||||
else:
|
|
||||||
return []
|
|
Binary file not shown.
Before Width: | Height: | Size: 1.5 KiB |
Loading…
x
Reference in New Issue
Block a user