diff --git a/recipes/economist.recipe b/recipes/economist.recipe index c251a57ee1..90901c1b28 100644 --- a/recipes/economist.recipe +++ b/recipes/economist.recipe @@ -264,20 +264,9 @@ class Economist(BasicNewsRecipe): return ans def economist_parse_index(self, soup): - img = None - if edition_date: - archive_url = "https://www.economist.com/weeklyedition/archive?year={}".format(edition_date[:4]) - archive = self.index_to_soup(archive_url) - q = edition_date.replace('-', '') - q = '/print-covers/{}_'.format(q) - img = archive.find('img', srcset=lambda x: x and q in x) - else: - archive = self.index_to_soup("https://www.economist.com/weeklyedition/archive") - div = archive.find(attrs={'class': 'edition-teaser__image'}) - if div is not None: - img = div.find('img', srcset=True) - if img: - self.cover_url = img['srcset'].split(',')[-1].split()[0] + if (tag := soup.select_one("script#__NEXT_DATA__")) is not None: + data = json.loads(tag.string) + self.cover_url = data['props']['pageProps']['content']['image']['main']['url']['canonical'] self.log('Got cover:', self.cover_url) feeds = [] for section in soup.findAll(**classes('layout-weekly-edition-section')): diff --git a/recipes/economist_free.recipe b/recipes/economist_free.recipe index c251a57ee1..90901c1b28 100644 --- a/recipes/economist_free.recipe +++ b/recipes/economist_free.recipe @@ -264,20 +264,9 @@ class Economist(BasicNewsRecipe): return ans def economist_parse_index(self, soup): - img = None - if edition_date: - archive_url = "https://www.economist.com/weeklyedition/archive?year={}".format(edition_date[:4]) - archive = self.index_to_soup(archive_url) - q = edition_date.replace('-', '') - q = '/print-covers/{}_'.format(q) - img = archive.find('img', srcset=lambda x: x and q in x) - else: - archive = self.index_to_soup("https://www.economist.com/weeklyedition/archive") - div = archive.find(attrs={'class': 'edition-teaser__image'}) - if div is not None: - img = div.find('img', srcset=True) - if img: - self.cover_url = img['srcset'].split(',')[-1].split()[0] + if (tag := soup.select_one("script#__NEXT_DATA__")) is not None: + data = json.loads(tag.string) + self.cover_url = data['props']['pageProps']['content']['image']['main']['url']['canonical'] self.log('Got cover:', self.cover_url) feeds = [] for section in soup.findAll(**classes('layout-weekly-edition-section')):