mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
code indentation (manual)
ruff 'E111,E112,E113,E117'
This commit is contained in:
parent
8d28380515
commit
8032201318
@ -51,7 +51,7 @@ class andhra(BasicNewsRecipe):
|
||||
self.log('## For your local edition id, modify this recipe to match your edi_id from the cities below\n')
|
||||
for edi in edi_data:
|
||||
if edi['org_location'] in {'Magazines', 'Navya Daily'}:
|
||||
continue
|
||||
continue
|
||||
self.log(edi['org_location'])
|
||||
cities = []
|
||||
for edi_loc in edi['editionlocation']:
|
||||
|
@ -51,7 +51,7 @@ class andhra(BasicNewsRecipe):
|
||||
self.log('## For your local edition id, modify this recipe to match your edi_id from the cities below\n')
|
||||
for edi in edi_data:
|
||||
if edi['org_location'] in {'Magazines', 'Navya Daily'}:
|
||||
continue
|
||||
continue
|
||||
self.log(edi['org_location'])
|
||||
cities = []
|
||||
for edi_loc in edi['editionlocation']:
|
||||
|
@ -339,9 +339,9 @@ class Economist(BasicNewsRecipe):
|
||||
def economist_parse_index(self, raw):
|
||||
# edition_date = self.recipe_specific_options.get('date')
|
||||
# if edition_date and isinstance(edition_date, str):
|
||||
# data = json.loads(raw)['data']['section']
|
||||
# data = json.loads(raw)['data']['section']
|
||||
# else:
|
||||
# data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
|
||||
# data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
|
||||
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
|
||||
dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone)
|
||||
dt = dt.strftime('%b %d, %Y')
|
||||
|
@ -339,9 +339,9 @@ class Economist(BasicNewsRecipe):
|
||||
def economist_parse_index(self, raw):
|
||||
# edition_date = self.recipe_specific_options.get('date')
|
||||
# if edition_date and isinstance(edition_date, str):
|
||||
# data = json.loads(raw)['data']['section']
|
||||
# data = json.loads(raw)['data']['section']
|
||||
# else:
|
||||
# data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
|
||||
# data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
|
||||
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
|
||||
dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone)
|
||||
dt = dt.strftime('%b %d, %Y')
|
||||
|
@ -44,27 +44,27 @@ class Engadget(BasicNewsRecipe):
|
||||
feeds = [(u'Posts', u'https://www.engadget.com/rss.xml')]
|
||||
|
||||
def parse_feeds(self):
|
||||
# Call parent's method.
|
||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||
# Loop through all feeds.
|
||||
for feed in feeds:
|
||||
# Loop through all articles in feed.
|
||||
for article in feed.articles[:]:
|
||||
# Remove articles with '...' in the url.
|
||||
if '/deals/' in article.url:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
# Remove articles with '...' in the title.
|
||||
elif 'best tech deals' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Podcast' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'The Morning After' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
return feeds
|
||||
# Call parent's method.
|
||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||
# Loop through all feeds.
|
||||
for feed in feeds:
|
||||
# Loop through all articles in feed.
|
||||
for article in feed.articles[:]:
|
||||
# Remove articles with '...' in the url.
|
||||
if '/deals/' in article.url:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
# Remove articles with '...' in the title.
|
||||
elif 'best tech deals' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Podcast' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'The Morning After' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
return feeds
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for attr in 'data-src data-src-mobile'.split():
|
||||
|
@ -33,7 +33,7 @@ def format_tickaroo_liveblog(soup):
|
||||
ntag.insert_after(temp)
|
||||
|
||||
|
||||
# process run of images
|
||||
# process run of images
|
||||
def bilderstrecke(soup,tag):
|
||||
flag = False
|
||||
try:
|
||||
@ -242,7 +242,7 @@ class FazNet(BasicNewsRecipe):
|
||||
|
||||
# format liveblog
|
||||
if soup.find(attrs={'class':'tik4-live__container'}):
|
||||
format_tickaroo_liveblog(soup)
|
||||
format_tickaroo_liveblog(soup)
|
||||
|
||||
# remove sizes and calc attributes in images
|
||||
for tag in soup.findAll('img'):
|
||||
|
@ -153,15 +153,15 @@ class RadioCanada(BasicNewsRecipe):
|
||||
# https://www.mobileread.com/forums/showpost.php?p=1165462&postcount=6
|
||||
# Credit goes to user Starson17
|
||||
def parse_feeds (self):
|
||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||
for feed in feeds:
|
||||
for article in feed.articles[:]:
|
||||
if ('VIDEO' in article.title.upper() or
|
||||
'OHDIO' in article.title.upper() or
|
||||
'/emissions/' in article.url or
|
||||
'/segments/' in article.url or
|
||||
'/entrevue/' in article.url or
|
||||
'/ohdio/' in article.url
|
||||
):
|
||||
feed.articles.remove(article)
|
||||
return feeds
|
||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||
for feed in feeds:
|
||||
for article in feed.articles[:]:
|
||||
if ('VIDEO' in article.title.upper() or
|
||||
'OHDIO' in article.title.upper() or
|
||||
'/emissions/' in article.url or
|
||||
'/segments/' in article.url or
|
||||
'/entrevue/' in article.url or
|
||||
'/ohdio/' in article.url
|
||||
):
|
||||
feed.articles.remove(article)
|
||||
return feeds
|
||||
|
@ -80,42 +80,41 @@ class RND(BasicNewsRecipe):
|
||||
]
|
||||
|
||||
def parse_feeds(self):
|
||||
# Call parent's method.
|
||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||
# Loop through all feeds.
|
||||
for feed in feeds:
|
||||
# Loop through all articles in feed.
|
||||
for article in feed.articles[:]:
|
||||
# Remove articles with '...' in the url.
|
||||
if '/anzeige/' in article.url:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
# Remove articles with '...' in the title.
|
||||
elif 'Liveticker' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Liveblog' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Newsblog' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Podcast' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
|
||||
return feeds
|
||||
# Call parent's method.
|
||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||
# Loop through all feeds.
|
||||
for feed in feeds:
|
||||
# Loop through all articles in feed.
|
||||
for article in feed.articles[:]:
|
||||
# Remove articles with '...' in the url.
|
||||
if '/anzeige/' in article.url:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
# Remove articles with '...' in the title.
|
||||
elif 'Liveticker' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Liveblog' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Newsblog' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Podcast' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
return feeds
|
||||
|
||||
def preprocess_raw_html(self, raw, url):
|
||||
# remove articles requiring login and advertisements
|
||||
unwantedtag='ArticleHeadstyled__ArticleHeadPaidIconContainer'
|
||||
if unwantedtag in raw:
|
||||
print('Skipping unwanted article with tag:',unwantedtag)
|
||||
self.abort_article('Skipping unwanted article')
|
||||
# remove articles requiring login and advertisements
|
||||
unwantedtag='ArticleHeadstyled__ArticleHeadPaidIconContainer'
|
||||
if unwantedtag in raw:
|
||||
print('Skipping unwanted article with tag:',unwantedtag)
|
||||
self.abort_article('Skipping unwanted article')
|
||||
|
||||
unwanted_article_keywords = ['Zum Login']
|
||||
for keyword in unwanted_article_keywords:
|
||||
if keyword in raw:
|
||||
print('Skipping unwanted article with keyword(s):',keyword)
|
||||
#self.abort_article('Skipping unwanted article')
|
||||
return raw
|
||||
unwanted_article_keywords = ['Zum Login']
|
||||
for keyword in unwanted_article_keywords:
|
||||
if keyword in raw:
|
||||
print('Skipping unwanted article with keyword(s):',keyword)
|
||||
#self.abort_article('Skipping unwanted article')
|
||||
return raw
|
||||
|
@ -194,42 +194,41 @@ class Saechsische(BasicNewsRecipe):
|
||||
]
|
||||
|
||||
def parse_feeds(self):
|
||||
# Call parent's method.
|
||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||
# Loop through all feeds.
|
||||
for feed in feeds:
|
||||
# Loop through all articles in feed.
|
||||
for article in feed.articles[:]:
|
||||
# Remove articles with '...' in the url.
|
||||
if '/anzeige/' in article.url:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
# Remove articles with '...' in the title.
|
||||
elif 'Liveticker' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Liveblog' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Newsblog' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Podcast' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
|
||||
return feeds
|
||||
# Call parent's method.
|
||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||
# Loop through all feeds.
|
||||
for feed in feeds:
|
||||
# Loop through all articles in feed.
|
||||
for article in feed.articles[:]:
|
||||
# Remove articles with '...' in the url.
|
||||
if '/anzeige/' in article.url:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
# Remove articles with '...' in the title.
|
||||
elif 'Liveticker' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Liveblog' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Newsblog' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
elif 'Podcast' in article.title:
|
||||
print('Removing:',article.title)
|
||||
feed.articles.remove(article)
|
||||
return feeds
|
||||
|
||||
def preprocess_raw_html(self, raw, url):
|
||||
# remove Newsblogs, articles requiring login and advertisements
|
||||
unwantedtag='ArticleHeadstyled__ArticleHeadPaidIconContainer'
|
||||
if unwantedtag in raw:
|
||||
print('Skipping unwanted article with tag:',unwantedtag)
|
||||
self.abort_article('Skipping unwanted article')
|
||||
# remove Newsblogs, articles requiring login and advertisements
|
||||
unwantedtag='ArticleHeadstyled__ArticleHeadPaidIconContainer'
|
||||
if unwantedtag in raw:
|
||||
print('Skipping unwanted article with tag:',unwantedtag)
|
||||
self.abort_article('Skipping unwanted article')
|
||||
|
||||
unwanted_article_keywords = ['Zum Login']
|
||||
for keyword in unwanted_article_keywords:
|
||||
if keyword in raw:
|
||||
print('Skipping unwanted article with keyword(s):',keyword)
|
||||
#self.abort_article('Skipping unwanted article')
|
||||
return raw
|
||||
unwanted_article_keywords = ['Zum Login']
|
||||
for keyword in unwanted_article_keywords:
|
||||
if keyword in raw:
|
||||
print('Skipping unwanted article with keyword(s):',keyword)
|
||||
#self.abort_article('Skipping unwanted article')
|
||||
return raw
|
||||
|
@ -114,7 +114,7 @@ class WSJ(BasicNewsRecipe):
|
||||
for figc in soup.findAll('figcaption'):
|
||||
figc['id'] = 'big-top-caption'
|
||||
if name:= soup.find('h2', attrs={'itemprop':'name'}):
|
||||
name.extract()
|
||||
name.extract()
|
||||
for h2 in soup.findAll('h2'):
|
||||
if self.tag_to_string(h2).startswith(('What to Read Next', 'Conversation')):
|
||||
h2.extract()
|
||||
|
@ -37,6 +37,7 @@ select = [
|
||||
'RUF039', # always use raw-string for regex
|
||||
'RUF047', # needless else
|
||||
'E302', 'E303', 'E304', 'E305', 'W391', # blank-line standard
|
||||
'E111', 'E112', 'E113', 'E117', # code indentation
|
||||
]
|
||||
|
||||
[lint.per-file-ignores]
|
||||
|
@ -529,9 +529,9 @@ def serialize_user_metadata(metadata_elem, all_user_metadata, tail='\n'+(' '*8))
|
||||
for name, fm in all_user_metadata.items():
|
||||
try:
|
||||
fm = copy.copy(fm)
|
||||
if (fm.get('datatype', 'text') == 'composite' and
|
||||
not fm.get('display', {}).get('composite_store_template_value_in_opf', True)):
|
||||
fm['#value#'] = ''
|
||||
if (fm.get('datatype', 'text') == 'composite'
|
||||
and not fm.get('display', {}).get('composite_store_template_value_in_opf', True)):
|
||||
fm['#value#'] = ''
|
||||
encode_is_multiple(fm)
|
||||
fm = object_to_unicode(fm)
|
||||
fm = json.dumps(fm, default=to_json, ensure_ascii=False)
|
||||
|
@ -951,9 +951,9 @@ def set_user_metadata(root, prefixes, refines, val):
|
||||
nval = {}
|
||||
for name, fm in val.items():
|
||||
fm = fm.copy()
|
||||
if (fm.get('datatype', 'text') == 'composite' and
|
||||
not fm.get('display', {}).get('composite_store_template_value_in_opf', True)):
|
||||
fm['#value#'] = ''
|
||||
if (fm.get('datatype', 'text') == 'composite'
|
||||
and not fm.get('display', {}).get('composite_store_template_value_in_opf', True)):
|
||||
fm['#value#'] = ''
|
||||
encode_is_multiple(fm)
|
||||
nval[name] = fm
|
||||
set_user_metadata3(root, prefixes, refines, nval)
|
||||
|
@ -1266,17 +1266,13 @@ class Page:
|
||||
absorb_into = None
|
||||
else:
|
||||
absorb_into = prev_region
|
||||
if self.regions[next_region].line_count >= \
|
||||
self.regions[prev_region].line_count:
|
||||
if self.regions[next_region].line_count >= self.regions[prev_region].line_count:
|
||||
avg_column_count = sum(len(r.columns) for r in regions)/float(len(regions))
|
||||
if self.regions[next_region].line_count > \
|
||||
self.regions[prev_region].line_count \
|
||||
or abs(avg_column_count -
|
||||
len(self.regions[prev_region].columns)) \
|
||||
> abs(avg_column_count -
|
||||
len(self.regions[next_region].columns)):
|
||||
absorb_into = next_region
|
||||
absorb_at = 'top'
|
||||
if self.regions[next_region].line_count > self.regions[prev_region].line_count \
|
||||
or abs(avg_column_count - len(self.regions[prev_region].columns)) \
|
||||
> abs(avg_column_count - len(self.regions[next_region].columns)):
|
||||
absorb_into = next_region
|
||||
absorb_at = 'top'
|
||||
if absorb_into is not None:
|
||||
self.regions[absorb_into].absorb_regions(regions, absorb_at)
|
||||
absorbed.update(regions)
|
||||
@ -1954,7 +1950,7 @@ class PDFDocument:
|
||||
# Do not merge if the next paragraph is indented
|
||||
if page.texts:
|
||||
if candidate:
|
||||
last_line = candidate.texts[-1]
|
||||
last_line = candidate.texts[-1]
|
||||
if candidate \
|
||||
and last_line.bottom > orphan_space \
|
||||
and page.texts[0].indented == 0:
|
||||
|
@ -33,11 +33,11 @@ class GeneralInformationDialog(Dialog):
|
||||
l.addWidget(self.bb)
|
||||
html = ''
|
||||
if self.include_general_doc:
|
||||
html += '<h2>General Information</h2>'
|
||||
html += FFMLProcessor().document_to_html(general_doc, 'Template General Information')
|
||||
html += '<h2>General Information</h2>'
|
||||
html += FFMLProcessor().document_to_html(general_doc, 'Template General Information')
|
||||
if self.include_ffml_doc:
|
||||
html += '<h2>Format Function Markup Language Documentation</h2>'
|
||||
html += FFMLProcessor().document_to_html(ffml_doc, 'FFML Documentation')
|
||||
html += '<h2>Format Function Markup Language Documentation</h2>'
|
||||
html += FFMLProcessor().document_to_html(ffml_doc, 'FFML Documentation')
|
||||
e.setHtml(html)
|
||||
|
||||
|
||||
|
@ -101,7 +101,7 @@ class TagDelegate(QStyledItemDelegate): # {{{
|
||||
|
||||
def text_color(self, hover, palette) -> QColor:
|
||||
if QApplication.instance().is_dark_theme and hover:
|
||||
return QColor(Qt.GlobalColor.black)
|
||||
return QColor(Qt.GlobalColor.black)
|
||||
return palette.color(QPalette.ColorRole.WindowText)
|
||||
|
||||
def draw_text(self, style, painter, option, widget, index, item):
|
||||
|
@ -66,12 +66,12 @@ class SpeechdTTSBackend(TTSBackend):
|
||||
|
||||
@property
|
||||
def available_voices(self) -> dict[str, tuple[Voice, ...]]:
|
||||
if self._voices is None:
|
||||
if self._voices is None:
|
||||
try:
|
||||
self._voices = self._get_all_voices_for_all_output_modules()
|
||||
except Exception as e:
|
||||
self._set_error(str(e))
|
||||
return self._voices or {}
|
||||
return self._voices or {}
|
||||
|
||||
def stop(self) -> None:
|
||||
self._last_mark = self._last_text = ''
|
||||
|
Loading…
x
Reference in New Issue
Block a user