From a5810a362eb63b2f79bbe0b50128101e2109f4ee Mon Sep 17 00:00:00 2001 From: unkn0w7n <51942695+unkn0w7n@users.noreply.github.com> Date: Sun, 14 Jul 2024 14:17:58 +0530 Subject: [PATCH] ... --- recipes/bloomberg.recipe | 41 ++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/recipes/bloomberg.recipe b/recipes/bloomberg.recipe index 3ccef3a289..a52d0be669 100644 --- a/recipes/bloomberg.recipe +++ b/recipes/bloomberg.recipe @@ -73,31 +73,32 @@ class Bloomberg(BasicNewsRecipe): def parse_index(self): inx = 'https://cdn-mobapi.bloomberg.com' sec = self.index_to_soup(inx + '/wssmobile/v1/navigation/bloomberg_app/search-v2', raw=True) - sec_data = json.loads(sec)['searchNav'][0]['items'] + sec_data = json.loads(sec)['searchNav'] feeds = [] - for sects in sec_data: - section = sects['title'] - sec_slug = sects['links']['self']['href'] - self.log(section) + for i in sec_data: + for sects in i['items']: + section = sects['title'] + sec_slug = sects['links']['self']['href'] + self.log(section) - articles = [] + articles = [] - art_soup = self.index_to_soup(inx + sec_slug, raw=True) - for arts in json.loads(art_soup)['modules']: - if arts['stories']: - for x in arts['stories']: - if x.get('type', '') == 'article': - dt = datetime.fromtimestamp(x['published'] + time.timezone) - if (datetime.now() - dt) > timedelta(self.oldest_article): - continue - title = x['title'] - desc = x['autoGeneratedSummary'] - url = inx + '/wssmobile/v1/stories/' + x['internalID'] - self.log(' ', title, '\n\t', desc) - articles.append({'title': title, 'description':desc, 'url': url}) - feeds.append((section, articles)) + art_soup = self.index_to_soup(inx + sec_slug, raw=True) + for arts in json.loads(art_soup)['modules']: + if arts['stories']: + for x in arts['stories']: + if x.get('type', '') == 'article': + dt = datetime.fromtimestamp(x['published'] + time.timezone) + if (datetime.now() - dt) > timedelta(self.oldest_article): + continue + title = x['title'] + desc = x['autoGeneratedSummary'] + url = inx + '/wssmobile/v1/stories/' + x['internalID'] + self.log(' ', title, '\n\t', desc) + articles.append({'title': title, 'description':desc, 'url': url}) + feeds.append((section, articles)) return feeds def preprocess_raw_html(self, raw, *a):