This commit is contained in:
Kovid Goyal 2024-02-13 12:21:54 +05:30
commit 556140dd10
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
2 changed files with 19 additions and 15 deletions

View File

@ -37,7 +37,7 @@ class Politico(BasicNewsRecipe):
] ]
remove_tags = [ remove_tags = [
dict(name=['notags', 'embed', 'aside', 'object', 'link', 'img', 'figure']), dict(name=['notags', 'embed', 'aside', 'object', 'link', 'img', 'figure', 'svg', 'button']),
dict( dict(
attrs={'class': lambda x: x and 'story-tools' in x.split()}), attrs={'class': lambda x: x and 'story-tools' in x.split()}),
dict( dict(

View File

@ -9,38 +9,42 @@ class SwarajyaMag(BasicNewsRecipe):
no_stylesheets = True no_stylesheets = True
remove_javascript = True remove_javascript = True
use_embedded_content = False use_embedded_content = False
remove_attributes = ['height', 'width'] remove_attributes = ['height', 'width', 'style']
encoding = 'utf-8' encoding = 'utf-8'
keep_only_tags = [ keep_only_tags = [
classes('_2PqtR _1sMRD ntw8h author-bio'), dict(name='article')
] ]
remove_tags = [ remove_tags = [
classes('_JscD _2r17a'), dict(name=['svg', 'button', 'source']),
classes('swarajya_patron_block hs-tooltip-content hidden'),
] ]
def preprocess_html(self, soup): def preprocess_html(self, soup):
for img in soup.findAll('img', attrs={'data-src': True}): for span in soup.findAll('span'):
img['src'] = img['data-src'].split('?')[0] if self.tag_to_string(span).strip() == 'Tags':
div = span.findParent('div')
if div:
div.extract()
return soup return soup
def parse_index(self): def parse_index(self):
soup = self.index_to_soup('https://swarajyamag.com/all-issues') soup = self.index_to_soup('https://swarajyamag.com/all-issues')
a = soup.find('a', href=lambda x: x and x.startswith('/issue/')) a = soup.find('a', href=lambda x: x and x.startswith('https://swarajyamag.com/issue/'))
url = a['href'] url = a['href']
self.log('Downloading issue:', url) self.log('Downloading issue:', url)
self.cover_url = a.find('img', attrs={'data-src': True})['data-src'] self.cover_url = a.img['src']
soup = self.index_to_soup('https://swarajyamag.com' + url) soup = self.index_to_soup(url)
ans = [] ans = []
for a in soup.findAll(**classes('_2eOQr')): for div in soup.findAll('div', attrs={'class':'rounded'}):
url = a['href'] url = div.findParent('a')['href']
if url.startswith('/'): if url.startswith('/'):
url = 'https://swarajyamag.com' + url url = 'https://swarajyamag.com' + url
title = self.tag_to_string(a) h4 = div.find('h4')
d = a.find_previous_sibling('a', **classes('_2nEd_')) title = self.tag_to_string(h4)
if d: d = h4.next_sibling
desc = 'By ' + self.tag_to_string(d).strip() desc = 'By ' + self.tag_to_string(d).strip()
self.log(title, ' at ', url, '\n', desc) self.log(title, ' at ', url, '\n', desc)
ans.append({'title': title, 'url': url, 'description': desc}) ans.append({'title': title, 'url': url, 'description': desc})