calibre/recipes/hamilton_spectator.recipe
2024-03-30 13:03:29 +05:30

87 lines
2.9 KiB
Python

from calibre.ptempfile import PersistentTemporaryFile
from calibre.web.feeds.news import BasicNewsRecipe, classes
'''
Hamilton Spectator Calibre Recipe
'''
class HamiltonSpectator(BasicNewsRecipe):
title = u'Hamilton Spectator'
max_articles_per_feed = 50
__author__ = u'unkn0wn'
publisher = u'thespec.com'
description = u'Ontario Canada Newspaper'
category = u'News, Ontario, Canada'
remove_javascript = True
use_embedded_content = False
no_stylesheets = True
language = 'en_CA'
encoding = 'utf-8'
remove_attributes = ['style', 'height', 'width']
masthead_url = 'https://bloximages.chicago2.vip.townnews.com/thespec.com/content/tncms/custom/image/c0094646-1108-11ee-8af0-b3954ce40e5e.png'
ignore_duplicate_articles = {'title'}
articles_are_obfuscated = True
extra_css = '''
.caption { font-size:small; text-align:center; }
.authorList, .endnote_contrib { font-size:small; }
'''
keep_only_tags = [
classes(
'headline asset-summary authorList articleMainArt asset-body'
)
]
remove_tags = [
dict(name=['svg', 'button']),
dict(attrs={'id':['tncms-region-article_instory_top', 'tncms-region-article_bottom', 'asset-video-primary']}),
classes(
'tnt-blurred-image share-container subscriber-offers access-offers-in-page '
'access-offers-wrapper tnt-ads-container adLabelWrapperManual shareIcons '
'articleFeedbackCTA comments-container card-image'
)
]
def preprocess_html(self, soup):
for img in soup.findAll('img', attrs={'data-srcset':True}):
for x in img['data-srcset'].split(','):
if '640w' in x.split():
img['src'] = x.split()[0]
return soup
def get_obfuscated_article(self, url):
br = self.get_browser()
try:
br.open(url)
except Exception as e:
url = e.hdrs.get('location')
soup = self.index_to_soup(url)
link = soup.find('a', href=True)
skip_sections =[ # add sections you want to skip
'/video/', '/videos/', '/media/', 'podcast'
]
if any(x in link['href'] for x in skip_sections):
self.log('Aborting Article ', link['href'])
self.abort_article('skipping video links')
self.log('Downloading ', link['href'])
html = br.open(link['href']).read()
pt = PersistentTemporaryFile('.html')
pt.write(html)
pt.close()
return pt.name
feeds = []
sections = [
'news', 'politics', 'opinion', 'business', 'sports', 'life', 'entertainment'
]
for sec in sections:
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:thespec.com{}&hl=en-CA&gl=IN&ceid=CA:en'
feeds.append((sec.capitalize(), a.format('%2F' + sec + '%2F')))
feeds.append(('Others', a.format('')))