mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
write better list/set/dict comprehensions (extra-edit)
This commit is contained in:
parent
6c54a656ba
commit
6cda6a2e5d
@ -198,7 +198,7 @@ details and examples.
|
||||
lines += [f'.. _calibredb-{language}-{cmd}:', '']
|
||||
lines += [cmd, '~'*20, '']
|
||||
usage = parser.usage.strip()
|
||||
usage = list(usage.replace('%prog', 'calibredb').splitlines())
|
||||
usage = usage.replace('%prog', 'calibredb').splitlines()
|
||||
cmdline = ' '+usage[0]
|
||||
usage = usage[1:]
|
||||
usage = [re.sub(rf'({cmd})([^a-zA-Z0-9])', r':command:`\1`\2', i) for i in usage]
|
||||
|
@ -262,7 +262,7 @@ class Economist(BasicNewsRecipe):
|
||||
url = 'file:///' + pt.name
|
||||
feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
|
||||
self.log('\t', title, '\n\t\t', desc)
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
|
||||
def populate_article_metadata(self, article, soup, first):
|
||||
article.url = soup.find('h1')['title']
|
||||
|
@ -76,7 +76,7 @@ class andhra(BasicNewsRecipe):
|
||||
if snaps['ObjectType'] == 4:
|
||||
continue
|
||||
feeds_dict[section].append({'title': '', 'url': url})
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
|
||||
def preprocess_raw_html(self, raw, *a):
|
||||
data = json.loads(raw)
|
||||
|
@ -76,7 +76,7 @@ class andhra(BasicNewsRecipe):
|
||||
if snaps['ObjectType'] == 4:
|
||||
continue
|
||||
feeds_dict[section].append({'title': '', 'url': url})
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
|
||||
def preprocess_raw_html(self, raw, *a):
|
||||
data = json.loads(raw)
|
||||
|
@ -127,7 +127,7 @@ class barrons(BasicNewsRecipe):
|
||||
desc += ' | ' + self.tag_to_string(summ)
|
||||
self.log('\t', title, ' ', url, '\n\t', desc)
|
||||
ans[section].append({'title': title, 'url': url, 'description': desc})
|
||||
return [(section, articles) for section, articles in ans.items()]
|
||||
return list(ans.items())
|
||||
|
||||
def print_version(self, url):
|
||||
return url.split('?')[0].replace('/articles/', '/amp/articles/')
|
||||
|
@ -95,7 +95,7 @@ class BostonGlobePrint(BasicNewsRecipe):
|
||||
|
||||
self.log(section, '\n\t', title, '\n\t', desc, '\n\t\t', url)
|
||||
feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
|
||||
def preprocess_raw_html(self, raw_html, url):
|
||||
soup = self.index_to_soup(raw_html)
|
||||
|
@ -77,7 +77,7 @@ class Chronicle(BasicNewsRecipe):
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
ans = [(key, val) for key, val in feeds.items()]
|
||||
ans = list(feeds.items())
|
||||
return ans
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
|
@ -382,7 +382,7 @@ class Economist(BasicNewsRecipe):
|
||||
url = 'file:///' + pt.name
|
||||
feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
|
||||
self.log('\t', title, '\n\t\t', desc)
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
|
||||
def populate_article_metadata(self, article, soup, first):
|
||||
if not self.from_archive:
|
||||
|
@ -382,7 +382,7 @@ class Economist(BasicNewsRecipe):
|
||||
url = 'file:///' + pt.name
|
||||
feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
|
||||
self.log('\t', title, '\n\t\t', desc)
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
|
||||
def populate_article_metadata(self, article, soup, first):
|
||||
if not self.from_archive:
|
||||
|
@ -72,5 +72,5 @@ class epw(BasicNewsRecipe):
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
ans = [(key, val) for key, val in feeds.items()]
|
||||
ans = list(feeds.items())
|
||||
return ans
|
||||
|
@ -276,7 +276,7 @@ class Fokus(BasicNewsRecipe):
|
||||
section_to_articles = self.assign_articles_to_sections(sections, articles)
|
||||
|
||||
# Convert to the expected `list[tuple[str, dict[str, str, str, str]]]` format.
|
||||
feeds = [(section_url, article_dicts) for section_url, article_dicts in section_to_articles.items()]
|
||||
feeds = list(section_to_articles.items())
|
||||
num_articles = sum(len(article_dicts) for article_dicts in section_to_articles.values())
|
||||
self.log(f'A total of {num_articles} articles belonging to {len(section_to_articles)} sections were kept.')
|
||||
|
||||
|
@ -86,7 +86,7 @@ class ForeignPolicy(BasicNewsRecipe):
|
||||
desc += ' | ' + self.tag_to_string(dek)
|
||||
self.log('\t', title, url, '\n\t', desc)
|
||||
feeds_dict[current_section].append({'title': title, 'url': url, 'description': desc})
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for img in soup.findAll('img', attrs={'src':True}):
|
||||
|
@ -96,4 +96,4 @@ class Frontline(BasicNewsRecipe):
|
||||
continue
|
||||
self.log(section, '\n\t', title, '\n\t', desc, '\n\t\t', url)
|
||||
feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
|
@ -134,6 +134,6 @@ class TheHindu(BasicNewsRecipe):
|
||||
desc = 'Page no.' + item['pageno'] + ' | ' + item['teaser_text'] or ''
|
||||
self.log(' ', title, '\n\t', url)
|
||||
feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
else:
|
||||
return []
|
||||
|
@ -98,7 +98,7 @@ class ht(BasicNewsRecipe):
|
||||
desc = page_no
|
||||
self.log('\t', title, ' ', desc)
|
||||
feeds_dict[section].append({'title': title, 'description': desc, 'url': url})
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
|
||||
|
||||
def preprocess_raw_html(self, raw, *a):
|
||||
|
@ -85,7 +85,7 @@ class HistoryToday(BasicNewsRecipe):
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
|
||||
ans = [(key, val) for key, val in feeds.items()]
|
||||
ans = list(feeds.items())
|
||||
return ans
|
||||
|
||||
def cleanup(self):
|
||||
|
@ -151,7 +151,7 @@ class MitTechnologyReview(BasicNewsRecipe):
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
ans = [(key, val) for key, val in feeds.items()]
|
||||
ans = list(feeds.items())
|
||||
return ans
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
|
@ -82,7 +82,7 @@ class PhilosophyNow(BasicNewsRecipe):
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
ans = [(key, val) for key, val in feeds.items()]
|
||||
ans = list(feeds.items())
|
||||
return ans
|
||||
|
||||
# PN changes the content it delivers based on cookies, so the
|
||||
|
@ -85,7 +85,7 @@ class Sportstar(BasicNewsRecipe):
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
ans = [(key, val) for key, val in feeds.items()]
|
||||
ans = list(feeds.items())
|
||||
return ans
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
|
@ -65,4 +65,4 @@ class StrangeHorizons(BasicNewsRecipe):
|
||||
|
||||
self.log(sec, '\n\t', title, '\n\t', desc, '\n\t\t', url)
|
||||
feeds_dict[sec].append({'title': title, 'url': url, 'description': desc})
|
||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||
return list(feeds_dict.items())
|
||||
|
@ -287,7 +287,7 @@ class LibraryDatabase:
|
||||
return [[k, v] for k, v in iteritems(self.new_api.get_usage_count_by_id(field))]
|
||||
|
||||
def field_id_map(self, field):
|
||||
return [(k, v) for k, v in iteritems(self.new_api.get_id_map(field))]
|
||||
return list(iteritems(self.new_api.get_id_map(field)))
|
||||
|
||||
def get_custom_items_with_ids(self, label=None, num=None):
|
||||
try:
|
||||
|
@ -1395,8 +1395,7 @@ class Amazon(Source):
|
||||
q['field-keywords'] += ' ' + q.pop(f, '')
|
||||
q['field-keywords'] = q['field-keywords'].strip()
|
||||
|
||||
encoded_q = {x.encode('utf-8', 'ignore'): y.encode(
|
||||
'utf-8', 'ignore') for x, y in q.items()}
|
||||
encoded_q = {x.encode('utf-8', 'ignore'): y.encode('utf-8', 'ignore') for x, y in q.items()}
|
||||
url_query = urlencode(encoded_q)
|
||||
# amazon's servers want IRIs with unicode characters not percent esaped
|
||||
parts = []
|
||||
|
@ -106,7 +106,7 @@ class EbookIterator(BookmarksMixin):
|
||||
|
||||
def search(self, text, index, backwards=False):
|
||||
from calibre.ebooks.oeb.polish.parsing import parse
|
||||
pmap = [(i, path) for i, path in enumerate(self.spine)]
|
||||
pmap = list(enumerate(self.spine))
|
||||
if backwards:
|
||||
pmap.reverse()
|
||||
q = text.lower()
|
||||
|
@ -644,8 +644,7 @@ class Region:
|
||||
def collect_stats(self):
|
||||
for column in self.columns:
|
||||
column.collect_stats()
|
||||
self.average_line_separation = sum([x.average_line_separation for x in
|
||||
self.columns])/float(len(self.columns))
|
||||
self.average_line_separation = sum(x.average_line_separation for x in self.columns)/float(len(self.columns))
|
||||
|
||||
def __iter__(self):
|
||||
yield from self.columns
|
||||
@ -1263,8 +1262,7 @@ class Page:
|
||||
absorb_into = prev_region
|
||||
if self.regions[next_region].line_count >= \
|
||||
self.regions[prev_region].line_count:
|
||||
avg_column_count = sum([len(r.columns) for r in
|
||||
regions])/float(len(regions))
|
||||
avg_column_count = sum(len(r.columns) for r in regions)/float(len(regions))
|
||||
if self.regions[next_region].line_count > \
|
||||
self.regions[prev_region].line_count \
|
||||
or abs(avg_column_count -
|
||||
|
@ -887,7 +887,7 @@ def get_field_list(db, use_defaults=False, pref_data_override=None):
|
||||
for k in fields:
|
||||
if k not in result:
|
||||
result[k] = True
|
||||
return [(k,v) for k,v in result.items()]
|
||||
return list(result.items())
|
||||
|
||||
|
||||
def get_custom_columns_to_display_in_editor(db):
|
||||
|
@ -636,7 +636,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
key, (category_icon_map['user:'] if key.startswith('@') else category_icon_map['custom:'])))
|
||||
|
||||
if key.startswith('@'):
|
||||
path_parts = list(key.split('.'))
|
||||
path_parts = key.split('.')
|
||||
path = ''
|
||||
last_category_node = self.root_item
|
||||
tree_root = self.user_category_node_tree
|
||||
|
@ -105,7 +105,7 @@ class TagBrowserMixin: # {{{
|
||||
proxy_md = db.new_api.get_proxy_metadata(db.id(idx.row()))
|
||||
items = proxy_md.get(current_cat)
|
||||
if isinstance(items, str):
|
||||
items = [items,]
|
||||
items = [items]
|
||||
if items:
|
||||
items_title = _('{category} for current book').format(category=cdn)
|
||||
if len(items) > 4:
|
||||
|
@ -201,7 +201,7 @@ class EPUB_MOBI(CatalogPlugin):
|
||||
if opts.preset not in available_presets:
|
||||
if available_presets:
|
||||
print(_('Error: Preset "{}" not found.').format(opts.preset))
|
||||
print(_('Stored presets: {}').format(', '.join(list(sorted(available_presets.keys())))))
|
||||
print(_('Stored presets: {}').format(', '.join(sorted(available_presets.keys()))))
|
||||
else:
|
||||
print(_('Error: No stored presets.'))
|
||||
return 1
|
||||
|
@ -37,11 +37,10 @@ class UserDictionary:
|
||||
def __init__(self, **kwargs):
|
||||
self.name = kwargs['name']
|
||||
self.is_active = kwargs['is_active']
|
||||
self.words = {(w, langcode) for w, langcode in kwargs['words']}
|
||||
self.words = set(kwargs['words'])
|
||||
|
||||
def serialize(self):
|
||||
return {'name':self.name, 'is_active': self.is_active, 'words':[
|
||||
(w, l) for w, l in self.words]}
|
||||
return {'name':self.name, 'is_active': self.is_active, 'words': list(self.words)}
|
||||
|
||||
|
||||
_builtins = _custom = None
|
||||
|
@ -2603,7 +2603,7 @@ class BibTeX:
|
||||
'''
|
||||
Format authors for Bibtex compliance (get a list as input)
|
||||
'''
|
||||
return self.utf8ToBibtex(' and '.join(list(item)))
|
||||
return self.utf8ToBibtex(' and '.join(item))
|
||||
|
||||
def stripUnmatchedSyntax(self, text, open_character, close_character):
|
||||
'''
|
||||
|
@ -254,7 +254,7 @@ def main(args):
|
||||
with open(iff, 'rb') as f:
|
||||
orig = f.read()
|
||||
|
||||
chars = list(chars.split(','))
|
||||
chars = chars.split(',')
|
||||
individual, ranges = set(), set()
|
||||
|
||||
def not_single(c):
|
||||
|
Loading…
x
Reference in New Issue
Block a user