diff --git a/recipes/foreign_policy.recipe b/recipes/foreign_policy.recipe index 42806fe841..8a2bccc452 100644 --- a/recipes/foreign_policy.recipe +++ b/recipes/foreign_policy.recipe @@ -38,8 +38,8 @@ class ForeignPolicy(BasicNewsRecipe): def parse_index(self): soup = self.index_to_soup('https://foreignpolicy.com/the-magazine') - img = soup.find('img', src=True, attrs={'alt': lambda x: x and '-cover' in x}) - self.cover_url = img['src'] + img = soup.find('img', attrs={'data-lazy-src': lambda x: x and '-cover' in x}) + self.cover_url = img['data-lazy-src'] current_section = None amap = OrderedDict() for x in soup.findAll(name=('h2', 'h3')): diff --git a/recipes/foreignaffairs.recipe b/recipes/foreignaffairs.recipe index d2c533ba8a..4154ef392f 100644 --- a/recipes/foreignaffairs.recipe +++ b/recipes/foreignaffairs.recipe @@ -141,6 +141,8 @@ class ForeignAffairsRecipe(BasicNewsRecipe): conversion_options = {'comments': description, 'tags': category, 'language': 'en', 'publisher': publisher} + ignore_duplicate_articles = {'title', 'url'} + remove_empty_feeds = True def parse_index(self): soup = self.index_to_soup(self.INDEX) diff --git a/recipes/nautilus.recipe b/recipes/nautilus.recipe index 4ccdbc0558..fa35a36b2a 100644 --- a/recipes/nautilus.recipe +++ b/recipes/nautilus.recipe @@ -58,9 +58,7 @@ class Nautilus(BasicNewsRecipe): soup = self.index_to_soup('https://www.presspassnow.com/nautilus/issues/') div = soup.find('div', **classes('image-fade_in_back')) if div: - self.cover_url = div.find('img', - attrs={'srcset': True - })['srcset'].split(',')[-1].split()[0] + self.cover_url = div.find('img', src=True)['src'] return getattr(self, 'cover_url', self.cover_url) def preprocess_html(self, soup): diff --git a/recipes/swarajya.recipe b/recipes/swarajya.recipe index f065f8866d..fd81f39985 100644 --- a/recipes/swarajya.recipe +++ b/recipes/swarajya.recipe @@ -39,6 +39,9 @@ class SwarajyaMag(BasicNewsRecipe): if url.startswith('/'): url = 'https://swarajyamag.com' + url title = self.tag_to_string(a) - self.log(title, ' at ', url) - ans.append({'title': title, 'url': url}) + d = a.find_previous_sibling('a', **classes('_2nEd_')) + if d: + desc = 'By ' + self.tag_to_string(d).strip() + self.log(title, ' at ', url, '\n', desc) + ans.append({'title': title, 'url': url, 'description': desc}) return [('Articles', ans)]