mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge branch 'master' of https://github.com/unkn0w7n/calibre
This commit is contained in:
commit
2e4453f5d4
@ -1,12 +1,5 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
from datetime import datetime, timezone, timedelta
|
from urllib.parse import quote
|
||||||
from calibre.utils.date import parse_date
|
|
||||||
|
|
||||||
|
|
||||||
def absurl(x):
|
|
||||||
if x.startswith('/'):
|
|
||||||
x = 'https://www.livelaw.in' + x
|
|
||||||
return x
|
|
||||||
|
|
||||||
|
|
||||||
class livelaw(BasicNewsRecipe):
|
class livelaw(BasicNewsRecipe):
|
||||||
@ -22,81 +15,64 @@ class livelaw(BasicNewsRecipe):
|
|||||||
language = 'en_IN'
|
language = 'en_IN'
|
||||||
remove_attributes = ['height', 'width', 'style']
|
remove_attributes = ['height', 'width', 'style']
|
||||||
masthead_url = 'https://www.livelaw.in/images/logo.png'
|
masthead_url = 'https://www.livelaw.in/images/logo.png'
|
||||||
oldest_article = 2
|
|
||||||
max_articles_per_feed = 20
|
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
remove_javascript = True
|
||||||
ignore_duplicate_articles = {'title', 'url'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
extra_css = '[data-datestring]{font-size:smaller;}'
|
|
||||||
|
extra_css = '''
|
||||||
|
.news_detail_person_detail {font-size:small; color:#202020;}
|
||||||
|
.news-description { color:#202020; font-style:italic; }
|
||||||
|
'''
|
||||||
|
|
||||||
|
articles_are_obfuscated = True
|
||||||
|
|
||||||
|
def get_obfuscated_article(self, url):
|
||||||
|
br = self.get_browser()
|
||||||
|
soup = self.index_to_soup(url)
|
||||||
|
link = soup.a['href']
|
||||||
|
skip_sections =[ # add sections you want to skip
|
||||||
|
'/video/', '/videos/', '/multimedia/',
|
||||||
|
]
|
||||||
|
if any(x in link for x in skip_sections):
|
||||||
|
self.abort_article('skipping video links ', link)
|
||||||
|
self.log('Found ', link)
|
||||||
|
html = br.open(link).read()
|
||||||
|
return ({ 'data': html, 'url': link })
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
classes(
|
dict(name='div', attrs={'id':'page-content-wrapper'})
|
||||||
'trending_heading author-on-detail details-date-time detail_img_cover details-content-story'
|
|
||||||
)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
classes('in-image-ad-wrap'),
|
classes(
|
||||||
dict(
|
'in-image-ad-wrap news_details_social_media_icons news_details_social_icon_desktop '
|
||||||
name='div',
|
'audioSection news_details_tags_row nextpage'
|
||||||
attrs={'id': lambda x: x and x.startswith('inside_post_content_ad')}
|
|
||||||
),
|
),
|
||||||
dict(name='div', attrs={'id': lambda x: x and x.startswith('filler_ad')})
|
dict(attrs={'class':lambda x: x and 'inside-post-ad' in x}),
|
||||||
|
dict(attrs={'id':[
|
||||||
|
'news_buzz_updates', 'after_tags', 'comments_before', 'comments', 'comments_after'
|
||||||
|
]})
|
||||||
]
|
]
|
||||||
|
|
||||||
def articles_from_soup(self, soup):
|
def preprocess_html(self, soup):
|
||||||
ans = []
|
for img in soup.findAll('img', attrs={'data-src':True}):
|
||||||
div = soup.find('div', **classes('news_listing_section_mixin'))
|
img['src'] = img['data-src']
|
||||||
for h2 in div.findAll('h2', **classes('text_heading')):
|
for h2 in soup.findAll(['h2', 'h6']):
|
||||||
a = h2.find('a', href=True)
|
h2.name = 'p'
|
||||||
title = self.tag_to_string(a)
|
return soup
|
||||||
url = absurl(a['href'])
|
|
||||||
d = h2.find_next_sibling('div')
|
|
||||||
date = parse_date(
|
|
||||||
self.tag_to_string(d).replace(' AM GMT', ':00 +0530'
|
|
||||||
).replace(' PM GMT', ':00 +0530')
|
|
||||||
)
|
|
||||||
today = (datetime.now(timezone.utc)).replace(microsecond=0)
|
|
||||||
if (today - date) > timedelta(self.oldest_article):
|
|
||||||
url = ''
|
|
||||||
|
|
||||||
if not url or not title:
|
feeds = []
|
||||||
continue
|
|
||||||
|
|
||||||
self.log('\t', title)
|
when = '27' # hours
|
||||||
self.log('\t\t', url)
|
index = 'https://www.livelaw.in/'
|
||||||
ans.append({'title': title, 'url': url})
|
|
||||||
return ans
|
|
||||||
|
|
||||||
def parse_index(self):
|
sections = [
|
||||||
soup = self.index_to_soup('https://www.livelaw.in')
|
'top-stories', 'supreme-court', 'high-court', 'news-updates', 'consumer-cases', 'articles',
|
||||||
nav_div = soup.find('div', **classes('navbar_center'))
|
'lawschool', 'law-firms', 'round-ups'
|
||||||
section_list = []
|
]
|
||||||
|
|
||||||
# Finding all the section titles that are acceptable
|
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-IN&gl=IN&ceid=IN:en'
|
||||||
for a in nav_div.findAll(['a']):
|
|
||||||
if self.is_accepted_entry(a):
|
|
||||||
section_list.append((self.tag_to_string(a), absurl(a['href'])))
|
|
||||||
feeds = []
|
|
||||||
|
|
||||||
# For each section title, fetch the article urls
|
for sec in sections:
|
||||||
for section in section_list:
|
feeds.append((sec.capitalize(), a.format(when, quote(index + sec, safe=''))))
|
||||||
section_title = section[0]
|
feeds.append(('Others' , a.format(when, quote(index + sec, safe=''))))
|
||||||
section_url = section[1]
|
|
||||||
self.log(section_title, section_url)
|
|
||||||
soup = self.index_to_soup(section_url)
|
|
||||||
articles = self.articles_from_soup(soup)
|
|
||||||
if articles:
|
|
||||||
feeds.append((section_title, articles))
|
|
||||||
return feeds
|
|
||||||
|
|
||||||
def is_accepted_entry(self, entry):
|
|
||||||
# Those sections in the top nav bar that we will omit
|
|
||||||
omit_list = [
|
|
||||||
'videos', 'job-updates', 'events-corner', 'sponsored', 'hindi.livelaw.in', 'javascript:void(0);',
|
|
||||||
]
|
|
||||||
is_accepted = True
|
|
||||||
for omit_entry in omit_list:
|
|
||||||
if entry['href'].endswith(omit_entry):
|
|
||||||
is_accepted = False
|
|
||||||
break
|
|
||||||
return is_accepted
|
|
||||||
|
@ -20,7 +20,7 @@ class LiveMint(BasicNewsRecipe):
|
|||||||
|
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
||||||
if self.output_profile.short_name.startswith('kindle'):
|
if self.output_profile.short_name.startswith('kindle'):
|
||||||
@ -28,13 +28,18 @@ class LiveMint(BasicNewsRecipe):
|
|||||||
if is_saturday:
|
if is_saturday:
|
||||||
self.title = 'Mint Lounge | ' + date.today().strftime('%b %d, %Y')
|
self.title = 'Mint Lounge | ' + date.today().strftime('%b %d, %Y')
|
||||||
|
|
||||||
|
def get_cover_url(self):
|
||||||
|
today = date.today().strftime('%d/%m/%Y')
|
||||||
|
today = today.replace('/', '%2F')
|
||||||
|
raw = self.index_to_soup(
|
||||||
|
'https://epaper.livemint.com/Home/GetAllpages?editionid=1&editiondate=' + today, raw=True
|
||||||
|
)
|
||||||
|
for cov in json.loads(raw):
|
||||||
|
if cov['NewsProPageTitle'].lower().startswith(('front', 'cover')):
|
||||||
|
return cov['HighResolution']
|
||||||
|
|
||||||
if is_saturday:
|
if is_saturday:
|
||||||
|
|
||||||
def get_cover_url(self):
|
|
||||||
soup = self.index_to_soup('https://lifestyle.livemint.com/')
|
|
||||||
if citem := soup.find('div', attrs={'class':'headLatestIss_cover'}):
|
|
||||||
return citem.img['src'].replace('_tn.jpg', '_mr.jpg')
|
|
||||||
|
|
||||||
masthead_url = 'https://lifestyle.livemint.com/mintlounge/static-images/lounge-logo.svg'
|
masthead_url = 'https://lifestyle.livemint.com/mintlounge/static-images/lounge-logo.svg'
|
||||||
|
|
||||||
oldest_article = 6.5 # days
|
oldest_article = 6.5 # days
|
||||||
@ -74,13 +79,6 @@ class LiveMint(BasicNewsRecipe):
|
|||||||
img['src'] = img['data-img']
|
img['src'] = img['data-img']
|
||||||
return soup
|
return soup
|
||||||
else:
|
else:
|
||||||
|
|
||||||
def get_cover_url(self):
|
|
||||||
soup = self.index_to_soup(
|
|
||||||
'https://www.magzter.com/IN/HT-Digital-Streams-Ltd./Mint-Mumbai/Newspaper/'
|
|
||||||
)
|
|
||||||
for citem in soup.findAll('meta', content=lambda s: s and s.endswith('view/3.jpg')):
|
|
||||||
return citem['content']
|
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
img {margin:0 auto;}
|
img {margin:0 auto;}
|
||||||
@ -103,7 +101,7 @@ class LiveMint(BasicNewsRecipe):
|
|||||||
dict(name=['meta', 'link', 'svg', 'button', 'iframe']),
|
dict(name=['meta', 'link', 'svg', 'button', 'iframe']),
|
||||||
classes(
|
classes(
|
||||||
'trendingSimilarHeight moreNews mobAppDownload label msgError msgOk taboolaHeight gadgetSlider'
|
'trendingSimilarHeight moreNews mobAppDownload label msgError msgOk taboolaHeight gadgetSlider'
|
||||||
' socialHolder imgbig disclamerText disqus-comment-count openinApp2 lastAdSlot bs_logo manualbacklink'
|
' socialHolder imgbig disclamerText disqus-comment-count openinApp2 lastAdSlot bs_logo'
|
||||||
' datePublish sepStory premiumSlider moreStory Joinus moreAbout milestone benefitText'
|
' datePublish sepStory premiumSlider moreStory Joinus moreAbout milestone benefitText'
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
@ -14,7 +14,7 @@ class Politico(BasicNewsRecipe):
|
|||||||
title = 'Politico.eu'
|
title = 'Politico.eu'
|
||||||
__author__ = 'unkn0wn, Darko Miletic and Sujata Raman'
|
__author__ = 'unkn0wn, Darko Miletic and Sujata Raman'
|
||||||
description = ('We connect and empower professionals through nonpartisan journalism and actionable'
|
description = ('We connect and empower professionals through nonpartisan journalism and actionable'
|
||||||
'intelligence about European politics and policy. Download Weekly.')
|
' intelligence about European politics and policy. Download Weekly.')
|
||||||
publisher = 'Axel Springer SE.'
|
publisher = 'Axel Springer SE.'
|
||||||
category = 'news, politics, Europe'
|
category = 'news, politics, Europe'
|
||||||
oldest_article = 7 # days
|
oldest_article = 7 # days
|
||||||
|
Loading…
x
Reference in New Issue
Block a user