diff --git a/recipes/live_law.recipe b/recipes/live_law.recipe index 58bf6b4943..e40f1d926f 100644 --- a/recipes/live_law.recipe +++ b/recipes/live_law.recipe @@ -1,12 +1,5 @@ from calibre.web.feeds.news import BasicNewsRecipe, classes -from datetime import datetime, timezone, timedelta -from calibre.utils.date import parse_date - - -def absurl(x): - if x.startswith('/'): - x = 'https://www.livelaw.in' + x - return x +from urllib.parse import quote class livelaw(BasicNewsRecipe): @@ -22,81 +15,64 @@ class livelaw(BasicNewsRecipe): language = 'en_IN' remove_attributes = ['height', 'width', 'style'] masthead_url = 'https://www.livelaw.in/images/logo.png' - oldest_article = 2 - max_articles_per_feed = 20 remove_empty_feeds = True + remove_javascript = True ignore_duplicate_articles = {'title', 'url'} - extra_css = '[data-datestring]{font-size:smaller;}' + + extra_css = ''' + .news_detail_person_detail {font-size:small; color:#202020;} + .news-description { color:#202020; font-style:italic; } + ''' + + articles_are_obfuscated = True + + def get_obfuscated_article(self, url): + br = self.get_browser() + soup = self.index_to_soup(url) + link = soup.a['href'] + skip_sections =[ # add sections you want to skip + '/video/', '/videos/', '/multimedia/', + ] + if any(x in link for x in skip_sections): + self.abort_article('skipping video links ', link) + self.log('Found ', link) + html = br.open(link).read() + return ({ 'data': html, 'url': link }) keep_only_tags = [ - classes( - 'trending_heading author-on-detail details-date-time detail_img_cover details-content-story' - ) + dict(name='div', attrs={'id':'page-content-wrapper'}) ] remove_tags = [ - classes('in-image-ad-wrap'), - dict( - name='div', - attrs={'id': lambda x: x and x.startswith('inside_post_content_ad')} + classes( + 'in-image-ad-wrap news_details_social_media_icons news_details_social_icon_desktop ' + 'audioSection news_details_tags_row nextpage' ), - dict(name='div', attrs={'id': lambda x: x and x.startswith('filler_ad')}) + dict(attrs={'class':lambda x: x and 'inside-post-ad' in x}), + dict(attrs={'id':[ + 'news_buzz_updates', 'after_tags', 'comments_before', 'comments', 'comments_after' + ]}) ] - def articles_from_soup(self, soup): - ans = [] - div = soup.find('div', **classes('news_listing_section_mixin')) - for h2 in div.findAll('h2', **classes('text_heading')): - a = h2.find('a', href=True) - title = self.tag_to_string(a) - url = absurl(a['href']) - d = h2.find_next_sibling('div') - date = parse_date( - self.tag_to_string(d).replace(' AM GMT', ':00 +0530' - ).replace(' PM GMT', ':00 +0530') - ) - today = (datetime.now(timezone.utc)).replace(microsecond=0) - if (today - date) > timedelta(self.oldest_article): - url = '' + def preprocess_html(self, soup): + for img in soup.findAll('img', attrs={'data-src':True}): + img['src'] = img['data-src'] + for h2 in soup.findAll(['h2', 'h6']): + h2.name = 'p' + return soup - if not url or not title: - continue + feeds = [] - self.log('\t', title) - self.log('\t\t', url) - ans.append({'title': title, 'url': url}) - return ans + when = '27' # hours + index = 'https://www.livelaw.in/' - def parse_index(self): - soup = self.index_to_soup('https://www.livelaw.in') - nav_div = soup.find('div', **classes('navbar_center')) - section_list = [] + sections = [ + 'top-stories', 'supreme-court', 'high-court', 'news-updates', 'consumer-cases', 'articles', + 'lawschool', 'law-firms', 'round-ups' + ] - # Finding all the section titles that are acceptable - for a in nav_div.findAll(['a']): - if self.is_accepted_entry(a): - section_list.append((self.tag_to_string(a), absurl(a['href']))) - feeds = [] + a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-IN&gl=IN&ceid=IN:en' - # For each section title, fetch the article urls - for section in section_list: - section_title = section[0] - section_url = section[1] - self.log(section_title, section_url) - soup = self.index_to_soup(section_url) - articles = self.articles_from_soup(soup) - if articles: - feeds.append((section_title, articles)) - return feeds - - def is_accepted_entry(self, entry): - # Those sections in the top nav bar that we will omit - omit_list = [ - 'videos', 'job-updates', 'events-corner', 'sponsored', 'hindi.livelaw.in', 'javascript:void(0);', - ] - is_accepted = True - for omit_entry in omit_list: - if entry['href'].endswith(omit_entry): - is_accepted = False - break - return is_accepted + for sec in sections: + feeds.append((sec.capitalize(), a.format(when, quote(index + sec, safe='')))) + feeds.append(('Others' , a.format(when, quote(index + sec, safe='')))) diff --git a/recipes/livemint.recipe b/recipes/livemint.recipe index 2c075d8f78..a1ff2cb692 100644 --- a/recipes/livemint.recipe +++ b/recipes/livemint.recipe @@ -20,7 +20,7 @@ class LiveMint(BasicNewsRecipe): remove_empty_feeds = True resolve_internal_links = True - + def __init__(self, *args, **kwargs): BasicNewsRecipe.__init__(self, *args, **kwargs) if self.output_profile.short_name.startswith('kindle'): @@ -28,13 +28,18 @@ class LiveMint(BasicNewsRecipe): if is_saturday: self.title = 'Mint Lounge | ' + date.today().strftime('%b %d, %Y') + def get_cover_url(self): + today = date.today().strftime('%d/%m/%Y') + today = today.replace('/', '%2F') + raw = self.index_to_soup( + 'https://epaper.livemint.com/Home/GetAllpages?editionid=1&editiondate=' + today, raw=True + ) + for cov in json.loads(raw): + if cov['NewsProPageTitle'].lower().startswith(('front', 'cover')): + return cov['HighResolution'] + if is_saturday: - - def get_cover_url(self): - soup = self.index_to_soup('https://lifestyle.livemint.com/') - if citem := soup.find('div', attrs={'class':'headLatestIss_cover'}): - return citem.img['src'].replace('_tn.jpg', '_mr.jpg') - + masthead_url = 'https://lifestyle.livemint.com/mintlounge/static-images/lounge-logo.svg' oldest_article = 6.5 # days @@ -74,13 +79,6 @@ class LiveMint(BasicNewsRecipe): img['src'] = img['data-img'] return soup else: - - def get_cover_url(self): - soup = self.index_to_soup( - 'https://www.magzter.com/IN/HT-Digital-Streams-Ltd./Mint-Mumbai/Newspaper/' - ) - for citem in soup.findAll('meta', content=lambda s: s and s.endswith('view/3.jpg')): - return citem['content'] extra_css = ''' img {margin:0 auto;} @@ -103,7 +101,7 @@ class LiveMint(BasicNewsRecipe): dict(name=['meta', 'link', 'svg', 'button', 'iframe']), classes( 'trendingSimilarHeight moreNews mobAppDownload label msgError msgOk taboolaHeight gadgetSlider' - ' socialHolder imgbig disclamerText disqus-comment-count openinApp2 lastAdSlot bs_logo manualbacklink' + ' socialHolder imgbig disclamerText disqus-comment-count openinApp2 lastAdSlot bs_logo' ' datePublish sepStory premiumSlider moreStory Joinus moreAbout milestone benefitText' ) ] diff --git a/recipes/poliitico_eu.recipe b/recipes/poliitico_eu.recipe index 01d7ba59d4..9f16c40a36 100644 --- a/recipes/poliitico_eu.recipe +++ b/recipes/poliitico_eu.recipe @@ -14,7 +14,7 @@ class Politico(BasicNewsRecipe): title = 'Politico.eu' __author__ = 'unkn0wn, Darko Miletic and Sujata Raman' description = ('We connect and empower professionals through nonpartisan journalism and actionable' - 'intelligence about European politics and policy. Download Weekly.') + ' intelligence about European politics and policy. Download Weekly.') publisher = 'Axel Springer SE.' category = 'news, politics, Europe' oldest_article = 7 # days