diff --git a/recipes/andhrajyothy_ap.recipe b/recipes/andhrajyothy_ap.recipe index fbb43b7b56..68fe0d4e45 100644 --- a/recipes/andhrajyothy_ap.recipe +++ b/recipes/andhrajyothy_ap.recipe @@ -51,7 +51,7 @@ class andhra(BasicNewsRecipe): self.log('## For your local edition id, modify this recipe to match your edi_id from the cities below\n') for edi in edi_data: if edi['org_location'] in {'Magazines', 'Navya Daily'}: - continue + continue self.log(edi['org_location']) cities = [] for edi_loc in edi['editionlocation']: diff --git a/recipes/andhrajyothy_tel.recipe b/recipes/andhrajyothy_tel.recipe index aba0979455..6d8608ea81 100644 --- a/recipes/andhrajyothy_tel.recipe +++ b/recipes/andhrajyothy_tel.recipe @@ -51,7 +51,7 @@ class andhra(BasicNewsRecipe): self.log('## For your local edition id, modify this recipe to match your edi_id from the cities below\n') for edi in edi_data: if edi['org_location'] in {'Magazines', 'Navya Daily'}: - continue + continue self.log(edi['org_location']) cities = [] for edi_loc in edi['editionlocation']: diff --git a/recipes/economist.recipe b/recipes/economist.recipe index ddb96b6219..e513af729a 100644 --- a/recipes/economist.recipe +++ b/recipes/economist.recipe @@ -339,9 +339,9 @@ class Economist(BasicNewsRecipe): def economist_parse_index(self, raw): # edition_date = self.recipe_specific_options.get('date') # if edition_date and isinstance(edition_date, str): - # data = json.loads(raw)['data']['section'] + # data = json.loads(raw)['data']['section'] # else: - # data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0] + # data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0] data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0] dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone) dt = dt.strftime('%b %d, %Y') diff --git a/recipes/economist_free.recipe b/recipes/economist_free.recipe index ddb96b6219..e513af729a 100644 --- a/recipes/economist_free.recipe +++ b/recipes/economist_free.recipe @@ -339,9 +339,9 @@ class Economist(BasicNewsRecipe): def economist_parse_index(self, raw): # edition_date = self.recipe_specific_options.get('date') # if edition_date and isinstance(edition_date, str): - # data = json.loads(raw)['data']['section'] + # data = json.loads(raw)['data']['section'] # else: - # data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0] + # data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0] data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0] dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone) dt = dt.strftime('%b %d, %Y') diff --git a/recipes/endgadget.recipe b/recipes/endgadget.recipe index df10afd02a..bb8a3c00b4 100644 --- a/recipes/endgadget.recipe +++ b/recipes/endgadget.recipe @@ -44,27 +44,27 @@ class Engadget(BasicNewsRecipe): feeds = [(u'Posts', u'https://www.engadget.com/rss.xml')] def parse_feeds(self): - # Call parent's method. - feeds = BasicNewsRecipe.parse_feeds(self) - # Loop through all feeds. - for feed in feeds: - # Loop through all articles in feed. - for article in feed.articles[:]: - # Remove articles with '...' in the url. - if '/deals/' in article.url: - print('Removing:',article.title) - feed.articles.remove(article) - # Remove articles with '...' in the title. - elif 'best tech deals' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - elif 'Podcast' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - elif 'The Morning After' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - return feeds + # Call parent's method. + feeds = BasicNewsRecipe.parse_feeds(self) + # Loop through all feeds. + for feed in feeds: + # Loop through all articles in feed. + for article in feed.articles[:]: + # Remove articles with '...' in the url. + if '/deals/' in article.url: + print('Removing:',article.title) + feed.articles.remove(article) + # Remove articles with '...' in the title. + elif 'best tech deals' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + elif 'Podcast' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + elif 'The Morning After' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + return feeds def preprocess_html(self, soup): for attr in 'data-src data-src-mobile'.split(): diff --git a/recipes/faz_net.recipe b/recipes/faz_net.recipe index fa4e9c60d2..a87bfd495d 100644 --- a/recipes/faz_net.recipe +++ b/recipes/faz_net.recipe @@ -33,7 +33,7 @@ def format_tickaroo_liveblog(soup): ntag.insert_after(temp) - # process run of images +# process run of images def bilderstrecke(soup,tag): flag = False try: @@ -242,7 +242,7 @@ class FazNet(BasicNewsRecipe): # format liveblog if soup.find(attrs={'class':'tik4-live__container'}): - format_tickaroo_liveblog(soup) + format_tickaroo_liveblog(soup) # remove sizes and calc attributes in images for tag in soup.findAll('img'): diff --git a/recipes/radio_canada.recipe b/recipes/radio_canada.recipe index 39eb26d7d9..844a19b67d 100644 --- a/recipes/radio_canada.recipe +++ b/recipes/radio_canada.recipe @@ -153,15 +153,15 @@ class RadioCanada(BasicNewsRecipe): # https://www.mobileread.com/forums/showpost.php?p=1165462&postcount=6 # Credit goes to user Starson17 def parse_feeds (self): - feeds = BasicNewsRecipe.parse_feeds(self) - for feed in feeds: - for article in feed.articles[:]: - if ('VIDEO' in article.title.upper() or - 'OHDIO' in article.title.upper() or - '/emissions/' in article.url or - '/segments/' in article.url or - '/entrevue/' in article.url or - '/ohdio/' in article.url - ): - feed.articles.remove(article) - return feeds + feeds = BasicNewsRecipe.parse_feeds(self) + for feed in feeds: + for article in feed.articles[:]: + if ('VIDEO' in article.title.upper() or + 'OHDIO' in article.title.upper() or + '/emissions/' in article.url or + '/segments/' in article.url or + '/entrevue/' in article.url or + '/ohdio/' in article.url + ): + feed.articles.remove(article) + return feeds diff --git a/recipes/rnd.recipe b/recipes/rnd.recipe index 9140078970..92cea1b3f2 100644 --- a/recipes/rnd.recipe +++ b/recipes/rnd.recipe @@ -80,42 +80,41 @@ class RND(BasicNewsRecipe): ] def parse_feeds(self): - # Call parent's method. - feeds = BasicNewsRecipe.parse_feeds(self) - # Loop through all feeds. - for feed in feeds: - # Loop through all articles in feed. - for article in feed.articles[:]: - # Remove articles with '...' in the url. - if '/anzeige/' in article.url: - print('Removing:',article.title) - feed.articles.remove(article) - # Remove articles with '...' in the title. - elif 'Liveticker' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - elif 'Liveblog' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - elif 'Newsblog' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - elif 'Podcast' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - - return feeds + # Call parent's method. + feeds = BasicNewsRecipe.parse_feeds(self) + # Loop through all feeds. + for feed in feeds: + # Loop through all articles in feed. + for article in feed.articles[:]: + # Remove articles with '...' in the url. + if '/anzeige/' in article.url: + print('Removing:',article.title) + feed.articles.remove(article) + # Remove articles with '...' in the title. + elif 'Liveticker' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + elif 'Liveblog' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + elif 'Newsblog' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + elif 'Podcast' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + return feeds def preprocess_raw_html(self, raw, url): - # remove articles requiring login and advertisements - unwantedtag='ArticleHeadstyled__ArticleHeadPaidIconContainer' - if unwantedtag in raw: - print('Skipping unwanted article with tag:',unwantedtag) - self.abort_article('Skipping unwanted article') + # remove articles requiring login and advertisements + unwantedtag='ArticleHeadstyled__ArticleHeadPaidIconContainer' + if unwantedtag in raw: + print('Skipping unwanted article with tag:',unwantedtag) + self.abort_article('Skipping unwanted article') - unwanted_article_keywords = ['Zum Login'] - for keyword in unwanted_article_keywords: - if keyword in raw: - print('Skipping unwanted article with keyword(s):',keyword) - #self.abort_article('Skipping unwanted article') - return raw + unwanted_article_keywords = ['Zum Login'] + for keyword in unwanted_article_keywords: + if keyword in raw: + print('Skipping unwanted article with keyword(s):',keyword) + #self.abort_article('Skipping unwanted article') + return raw diff --git a/recipes/saechsische.recipe b/recipes/saechsische.recipe index 577800b500..b7b875e39f 100644 --- a/recipes/saechsische.recipe +++ b/recipes/saechsische.recipe @@ -194,42 +194,41 @@ class Saechsische(BasicNewsRecipe): ] def parse_feeds(self): - # Call parent's method. - feeds = BasicNewsRecipe.parse_feeds(self) - # Loop through all feeds. - for feed in feeds: - # Loop through all articles in feed. - for article in feed.articles[:]: - # Remove articles with '...' in the url. - if '/anzeige/' in article.url: - print('Removing:',article.title) - feed.articles.remove(article) - # Remove articles with '...' in the title. - elif 'Liveticker' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - elif 'Liveblog' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - elif 'Newsblog' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - elif 'Podcast' in article.title: - print('Removing:',article.title) - feed.articles.remove(article) - - return feeds + # Call parent's method. + feeds = BasicNewsRecipe.parse_feeds(self) + # Loop through all feeds. + for feed in feeds: + # Loop through all articles in feed. + for article in feed.articles[:]: + # Remove articles with '...' in the url. + if '/anzeige/' in article.url: + print('Removing:',article.title) + feed.articles.remove(article) + # Remove articles with '...' in the title. + elif 'Liveticker' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + elif 'Liveblog' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + elif 'Newsblog' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + elif 'Podcast' in article.title: + print('Removing:',article.title) + feed.articles.remove(article) + return feeds def preprocess_raw_html(self, raw, url): - # remove Newsblogs, articles requiring login and advertisements - unwantedtag='ArticleHeadstyled__ArticleHeadPaidIconContainer' - if unwantedtag in raw: - print('Skipping unwanted article with tag:',unwantedtag) - self.abort_article('Skipping unwanted article') + # remove Newsblogs, articles requiring login and advertisements + unwantedtag='ArticleHeadstyled__ArticleHeadPaidIconContainer' + if unwantedtag in raw: + print('Skipping unwanted article with tag:',unwantedtag) + self.abort_article('Skipping unwanted article') - unwanted_article_keywords = ['Zum Login'] - for keyword in unwanted_article_keywords: - if keyword in raw: - print('Skipping unwanted article with keyword(s):',keyword) - #self.abort_article('Skipping unwanted article') - return raw + unwanted_article_keywords = ['Zum Login'] + for keyword in unwanted_article_keywords: + if keyword in raw: + print('Skipping unwanted article with keyword(s):',keyword) + #self.abort_article('Skipping unwanted article') + return raw diff --git a/recipes/wsj_free.recipe b/recipes/wsj_free.recipe index 32c6703d95..8eb2c077d4 100644 --- a/recipes/wsj_free.recipe +++ b/recipes/wsj_free.recipe @@ -114,7 +114,7 @@ class WSJ(BasicNewsRecipe): for figc in soup.findAll('figcaption'): figc['id'] = 'big-top-caption' if name:= soup.find('h2', attrs={'itemprop':'name'}): - name.extract() + name.extract() for h2 in soup.findAll('h2'): if self.tag_to_string(h2).startswith(('What to Read Next', 'Conversation')): h2.extract() diff --git a/ruff-strict-pep8.toml b/ruff-strict-pep8.toml index 922e8b586b..b91df15ec7 100644 --- a/ruff-strict-pep8.toml +++ b/ruff-strict-pep8.toml @@ -37,6 +37,7 @@ select = [ 'RUF039', # always use raw-string for regex 'RUF047', # needless else 'E302', 'E303', 'E304', 'E305', 'W391', # blank-line standard + 'E111', 'E112', 'E113', 'E117', # code indentation ] [lint.per-file-ignores] diff --git a/src/calibre/ebooks/metadata/opf2.py b/src/calibre/ebooks/metadata/opf2.py index 7ffe52b44d..b29662a57d 100644 --- a/src/calibre/ebooks/metadata/opf2.py +++ b/src/calibre/ebooks/metadata/opf2.py @@ -529,9 +529,9 @@ def serialize_user_metadata(metadata_elem, all_user_metadata, tail='\n'+(' '*8)) for name, fm in all_user_metadata.items(): try: fm = copy.copy(fm) - if (fm.get('datatype', 'text') == 'composite' and - not fm.get('display', {}).get('composite_store_template_value_in_opf', True)): - fm['#value#'] = '' + if (fm.get('datatype', 'text') == 'composite' + and not fm.get('display', {}).get('composite_store_template_value_in_opf', True)): + fm['#value#'] = '' encode_is_multiple(fm) fm = object_to_unicode(fm) fm = json.dumps(fm, default=to_json, ensure_ascii=False) diff --git a/src/calibre/ebooks/metadata/opf3.py b/src/calibre/ebooks/metadata/opf3.py index 1669932fbe..999757dafa 100644 --- a/src/calibre/ebooks/metadata/opf3.py +++ b/src/calibre/ebooks/metadata/opf3.py @@ -951,9 +951,9 @@ def set_user_metadata(root, prefixes, refines, val): nval = {} for name, fm in val.items(): fm = fm.copy() - if (fm.get('datatype', 'text') == 'composite' and - not fm.get('display', {}).get('composite_store_template_value_in_opf', True)): - fm['#value#'] = '' + if (fm.get('datatype', 'text') == 'composite' + and not fm.get('display', {}).get('composite_store_template_value_in_opf', True)): + fm['#value#'] = '' encode_is_multiple(fm) nval[name] = fm set_user_metadata3(root, prefixes, refines, nval) diff --git a/src/calibre/ebooks/pdf/reflow.py b/src/calibre/ebooks/pdf/reflow.py index d0763bc76c..4586a0ff74 100644 --- a/src/calibre/ebooks/pdf/reflow.py +++ b/src/calibre/ebooks/pdf/reflow.py @@ -1266,17 +1266,13 @@ class Page: absorb_into = None else: absorb_into = prev_region - if self.regions[next_region].line_count >= \ - self.regions[prev_region].line_count: + if self.regions[next_region].line_count >= self.regions[prev_region].line_count: avg_column_count = sum(len(r.columns) for r in regions)/float(len(regions)) - if self.regions[next_region].line_count > \ - self.regions[prev_region].line_count \ - or abs(avg_column_count - - len(self.regions[prev_region].columns)) \ - > abs(avg_column_count - - len(self.regions[next_region].columns)): - absorb_into = next_region - absorb_at = 'top' + if self.regions[next_region].line_count > self.regions[prev_region].line_count \ + or abs(avg_column_count - len(self.regions[prev_region].columns)) \ + > abs(avg_column_count - len(self.regions[next_region].columns)): + absorb_into = next_region + absorb_at = 'top' if absorb_into is not None: self.regions[absorb_into].absorb_regions(regions, absorb_at) absorbed.update(regions) @@ -1954,7 +1950,7 @@ class PDFDocument: # Do not merge if the next paragraph is indented if page.texts: if candidate: - last_line = candidate.texts[-1] + last_line = candidate.texts[-1] if candidate \ and last_line.bottom > orphan_space \ and page.texts[0].indented == 0: diff --git a/src/calibre/gui2/dialogs/template_general_info.py b/src/calibre/gui2/dialogs/template_general_info.py index d767c0853b..dba23bd13c 100644 --- a/src/calibre/gui2/dialogs/template_general_info.py +++ b/src/calibre/gui2/dialogs/template_general_info.py @@ -33,11 +33,11 @@ class GeneralInformationDialog(Dialog): l.addWidget(self.bb) html = '' if self.include_general_doc: - html += '