useless tuple (extra-edit)

This commit is contained in:
un-pogaz 2025-01-24 11:14:24 +01:00
parent ed2930712d
commit 59cd38256a
81 changed files with 194 additions and 199 deletions

View File

@ -78,7 +78,7 @@ class AM730(BasicNewsRecipe):
break break
if self.debug: if self.debug:
print(articles) print(articles)
return (sectionName,articles) return sectionName, articles
def parse_index(self): def parse_index(self):
# hard code sections # hard code sections
@ -91,7 +91,7 @@ class AM730(BasicNewsRecipe):
('旅遊.飲食','https://www.am730.com.hk/news/%E6%97%85%E9%81%8A.%E9%A3%B2%E9%A3%9F') ('旅遊.飲食','https://www.am730.com.hk/news/%E6%97%85%E9%81%8A.%E9%A3%B2%E9%A3%9F')
] # articles =[] ] # articles =[]
SectionsArticles=[] SectionsArticles=[]
for (title, url) in Sections: for title, url in Sections:
if self.debug: if self.debug:
print(title) print(title)
print(url) print(url)

View File

@ -309,7 +309,7 @@ class CanWestPaper(BasicNewsRecipe):
for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}): for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}):
handle_article(wdiv, key) handle_article(wdiv, key)
for (k, url) in self.postmedia_index_pages: for k,url in self.postmedia_index_pages:
parse_web_index(k, url) parse_web_index(k, url)
ans = [(key, articles[key]) for key in ans if key in articles] ans = [(key, articles[key]) for key in ans if key in articles]
return ans return ans

View File

@ -309,7 +309,7 @@ class CanWestPaper(BasicNewsRecipe):
for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}): for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}):
handle_article(wdiv, key) handle_article(wdiv, key)
for (k, url) in self.postmedia_index_pages: for k,url in self.postmedia_index_pages:
parse_web_index(k, url) parse_web_index(k, url)
ans = [(key, articles[key]) for key in ans if key in articles] ans = [(key, articles[key]) for key in ans if key in articles]
return ans return ans

View File

@ -51,7 +51,7 @@ class AdvancedUserRecipe1639926896(BasicNewsRecipe):
lines = description.split('\n') lines = description.split('\n')
return '\n'.join([line.strip() for line in lines if len(line.strip()) > 0]) return '\n'.join([line.strip() for line in lines if len(line.strip()) > 0])
for (section_name, section_url_name) in self.sections: for section_name, section_url_name in self.sections:
soup = self.index_to_soup( soup = self.index_to_soup(
f'https://www.equestriadaily.com/search/label/{section_url_name}?max-results={self.max_articles_per_feed}') f'https://www.equestriadaily.com/search/label/{section_url_name}?max-results={self.max_articles_per_feed}')
articles = soup.select('div.post.hentry') articles = soup.select('div.post.hentry')

View File

@ -47,7 +47,7 @@ class GrantLand(BasicNewsRecipe):
for category in self.CATEGORIES: for category in self.CATEGORIES:
(cat_name, tag, max_articles) = category cat_name, tag, max_articles = category
self.log('Reading category:', cat_name) self.log('Reading category:', cat_name)
articles = [] articles = []

View File

@ -85,7 +85,7 @@ class IlPost(BasicNewsRecipe):
'title': title, 'title': title,
'description': desc 'description': desc
}) })
return (name, entries) return name, entries
def parse_index(self): def parse_index(self):
feeds = [] feeds = []

View File

@ -309,7 +309,7 @@ class CanWestPaper(BasicNewsRecipe):
for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}): for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}):
handle_article(wdiv, key) handle_article(wdiv, key)
for (k, url) in self.postmedia_index_pages: for k,url in self.postmedia_index_pages:
parse_web_index(k, url) parse_web_index(k, url)
ans = [(key, articles[key]) for key in ans if key in articles] ans = [(key, articles[key]) for key in ans if key in articles]
return ans return ans

View File

@ -309,7 +309,7 @@ class CanWestPaper(BasicNewsRecipe):
for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}): for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}):
handle_article(wdiv, key) handle_article(wdiv, key)
for (k, url) in self.postmedia_index_pages: for k,url in self.postmedia_index_pages:
parse_web_index(k, url) parse_web_index(k, url)
ans = [(key, articles[key]) for key in ans if key in articles] ans = [(key, articles[key]) for key in ans if key in articles]
return ans return ans

View File

@ -53,7 +53,7 @@ class SaturdayPaper(BasicNewsRecipe):
articles = [] articles = []
for (feed, link) in feeds: for feed, link in feeds:
soup = self.index_to_soup(link) soup = self.index_to_soup(link)
news = [] news = []

View File

@ -100,10 +100,10 @@ class PrivateEyeRecipe(BasicNewsRecipe):
return head.rsplit('. By ', 1) return head.rsplit('. By ', 1)
matches = self.title_author_re.findall(head) matches = self.title_author_re.findall(head)
if matches and len(matches[0]) == 3: if matches and len(matches[0]) == 3:
(title_1, author, title_2) = matches[0] title_1, author, title_2 = matches[0]
title = ': '.join((title_1, title_2)) title = ': '.join((title_1, title_2))
return (title, author) return title, author
return (head, None) return head, None
# Return the list of articles from blocks in the content of an index/listing page # Return the list of articles from blocks in the content of an index/listing page
def parse_content(self, soup): def parse_content(self, soup):
@ -117,7 +117,7 @@ class PrivateEyeRecipe(BasicNewsRecipe):
for article in content.findAll('div', class_='listing-block'): for article in content.findAll('div', class_='listing-block'):
for a in article.findAll('a', href=True): for a in article.findAll('a', href=True):
for h in a.findAll('h3'): for h in a.findAll('h3'):
(title, author) = self.title_author(h.getText()) title, author = self.title_author(h.getText())
content_articles.append(self.article_entry( content_articles.append(self.article_entry(
title=title, title=title,
url=self.abs_url(a.get('href')), url=self.abs_url(a.get('href')),
@ -155,7 +155,7 @@ class PrivateEyeRecipe(BasicNewsRecipe):
self.log('Subpages found:', href, len(content)) self.log('Subpages found:', href, len(content))
articles.extend(content) articles.extend(content)
else: else:
(title, author) = self.title_author(a.getText()) title, author = self.title_author(a.getText())
articles.append(self.article_entry( articles.append(self.article_entry(
title=title, title=title,
url=self.abs_url(a.get('href')), url=self.abs_url(a.get('href')),
@ -176,7 +176,7 @@ class PrivateEyeRecipe(BasicNewsRecipe):
# 4. The about pages # 4. The about pages
abouts = [] abouts = []
for (title, url) in self.about_pages.items(): for title, url in self.about_pages.items():
abouts.append({ abouts.append({
'title': title, 'title': title,
'url': url, 'url': url,
@ -190,7 +190,7 @@ class PrivateEyeRecipe(BasicNewsRecipe):
def preprocess_html(self, soup): def preprocess_html(self, soup):
for h in soup.findAll('h1'): for h in soup.findAll('h1'):
(title, author) = self.title_author(h.getText()) title, author = self.title_author(h.getText())
self.log('Replacing h3 "', h.getText(), '" with "', title, '"') self.log('Replacing h3 "', h.getText(), '" with "', title, '"')
h.string = title h.string = title

View File

@ -322,7 +322,7 @@ class CanWestPaper(BasicNewsRecipe):
for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}): for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}):
handle_article(wdiv, key) handle_article(wdiv, key)
for (k, url) in self.postmedia_index_pages: for k,url in self.postmedia_index_pages:
parse_web_index(k, url) parse_web_index(k, url)
ans = [(key, articles[key]) for key in ans if key in articles] ans = [(key, articles[key]) for key in ans if key in articles]
return ans return ans

View File

@ -310,7 +310,7 @@ class CanWestPaper(BasicNewsRecipe):
for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}): for wdiv in mainsoup.findAll(attrs={'class': ['headline', 'featurecontent']}):
handle_article(wdiv, key) handle_article(wdiv, key)
for (k, url) in self.postmedia_index_pages: for k,url in self.postmedia_index_pages:
parse_web_index(k, url) parse_web_index(k, url)
ans = [(key, articles[key]) for key in ans if key in articles] ans = [(key, articles[key]) for key in ans if key in articles]
return ans return ans

View File

@ -253,6 +253,6 @@ class TimesColonist(BasicNewsRecipe):
def parse_index(self): def parse_index(self):
ans = [] ans = []
for (url, title) in self.section_list: for url, title in self.section_list:
ans = self.add_section_index(ans, url, title) ans = self.add_section_index(ans, url, title)
return ans return ans

View File

@ -224,7 +224,7 @@ class ZeitEPUBAbo(BasicNewsRecipe):
# doing regular expression filtering # doing regular expression filtering
for path in walk('.'): for path in walk('.'):
(shortname, extension) = os.path.splitext(path) shortname, extension = os.path.splitext(path)
if extension.lower() in ('.html', '.htm', '.xhtml'): if extension.lower() in ('.html', '.htm', '.xhtml'):
with open(path, 'r+b') as f: with open(path, 'r+b') as f:
raw = f.read() raw = f.read()

View File

@ -56,16 +56,16 @@ def _get_series_values(val):
import re import re
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$') series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
if not val: if not val:
return (val, None) return val, None
match = series_index_pat.match(val.strip()) match = series_index_pat.match(val.strip())
if match is not None: if match is not None:
idx = match.group(2) idx = match.group(2)
try: try:
idx = float(idx) idx = float(idx)
return (match.group(1).strip(), idx) return match.group(1).strip(), idx
except: except:
pass pass
return (val, None) return val, None
def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None, convert_to_local_tz=True): def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None, convert_to_local_tz=True):

View File

@ -697,7 +697,7 @@ class DB:
self.deleted_fields = [] self.deleted_fields = []
with self.conn: with self.conn:
# Delete previously marked custom columns # Delete previously marked custom columns
for (num, label) in self.conn.get( for num, label in self.conn.get(
'SELECT id,label FROM custom_columns WHERE mark_for_delete=1'): 'SELECT id,label FROM custom_columns WHERE mark_for_delete=1'):
table, lt = self.custom_table_names(num) table, lt = self.custom_table_names(num)
self.execute('''\ self.execute('''\
@ -2455,9 +2455,7 @@ class DB:
ts = now.isoformat() ts = now.isoformat()
timestamp = (now - EPOCH).total_seconds() timestamp = (now - EPOCH).total_seconds()
for annot_id in annot_ids: for annot_id in annot_ids:
for (raw_annot_data, annot_type) in self.execute( for raw_annot_data, annot_type in self.execute('SELECT annot_data, annot_type FROM annotations WHERE id=?', (annot_id,)):
'SELECT annot_data, annot_type FROM annotations WHERE id=?', (annot_id,)
):
try: try:
annot_data = json.loads(raw_annot_data) annot_data = json.loads(raw_annot_data)
except Exception: except Exception:

View File

@ -721,8 +721,8 @@ class LibraryDatabase:
if isinstance(ans, tuple): if isinstance(ans, tuple):
ans = list(ans) ans = list(ans)
if data['datatype'] != 'series': if data['datatype'] != 'series':
return (ans, None) return ans, None
return (ans, self.new_api.field_for(self.custom_field_name(label, num) + '_index', book_id)) return ans, self.new_api.field_for(self.custom_field_name(label, num) + '_index', book_id)
def get_next_cc_series_num_for(self, series, label=None, num=None): def get_next_cc_series_num_for(self, series, label=None, num=None):
data = self.backend.custom_field_metadata(label, num) data = self.backend.custom_field_metadata(label, num)

View File

@ -125,7 +125,7 @@ class SHLock: # {{{
# If there are waiting shared locks, issue them # If there are waiting shared locks, issue them
# all and them wake everyone up. # all and them wake everyone up.
if self._shared_queue: if self._shared_queue:
for (thread, waiter) in self._shared_queue: for thread, waiter in self._shared_queue:
self.is_shared += 1 self.is_shared += 1
self._shared_owners[thread] = 1 self._shared_owners[thread] = 1
waiter.notify() waiter.notify()
@ -133,7 +133,7 @@ class SHLock: # {{{
# Otherwise, if there are waiting exclusive locks, # Otherwise, if there are waiting exclusive locks,
# they get first dibbs on the lock. # they get first dibbs on the lock.
elif self._exclusive_queue: elif self._exclusive_queue:
(thread, waiter) = self._exclusive_queue.pop(0) thread, waiter = self._exclusive_queue.pop(0)
self._exclusive_owner = thread self._exclusive_owner = thread
self.is_exclusive += 1 self.is_exclusive += 1
waiter.notify() waiter.notify()
@ -149,7 +149,7 @@ class SHLock: # {{{
# If there are waiting exclusive locks, # If there are waiting exclusive locks,
# they get first dibbs on the lock. # they get first dibbs on the lock.
if self._exclusive_queue: if self._exclusive_queue:
(thread, waiter) = self._exclusive_queue.pop(0) thread, waiter = self._exclusive_queue.pop(0)
self._exclusive_owner = thread self._exclusive_owner = thread
self.is_exclusive += 1 self.is_exclusive += 1
waiter.notify() waiter.notify()

View File

@ -280,7 +280,7 @@ class Notes:
if field_name: if field_name:
return {item_id for (item_id,) in conn.execute('SELECT item FROM notes_db.notes WHERE colname=?', (field_name,))} return {item_id for (item_id,) in conn.execute('SELECT item FROM notes_db.notes WHERE colname=?', (field_name,))}
ans = defaultdict(set) ans = defaultdict(set)
for (note_id, field_name) in conn.execute('SELECT item, colname FROM notes_db.notes'): for note_id, field_name in conn.execute('SELECT item, colname FROM notes_db.notes'):
ans[field_name].add(note_id) ans[field_name].add(note_id)
return ans return ans

View File

@ -162,10 +162,10 @@ class Restore(Thread):
self.tb = traceback.format_exc() self.tb = traceback.format_exc()
if self.failed_dirs: if self.failed_dirs:
for x in self.failed_dirs: for x in self.failed_dirs:
for (dirpath, tb) in self.failed_dirs: for dirpath, tb in self.failed_dirs:
self.tb += f'\n\n-------------\nFailed to restore: {dirpath}\n{tb}' self.tb += f'\n\n-------------\nFailed to restore: {dirpath}\n{tb}'
if self.failed_restores: if self.failed_restores:
for (book, tb) in self.failed_restores: for book, tb in self.failed_restores:
self.tb += f'\n\n-------------\nFailed to restore: {book["path"]}\n{tb}' self.tb += f'\n\n-------------\nFailed to restore: {book["path"]}\n{tb}'
def load_preferences(self): def load_preferences(self):

View File

@ -201,7 +201,7 @@ class ThumbnailCache:
try: try:
uuid, book_id = line.partition(' ')[0::2] uuid, book_id = line.partition(' ')[0::2]
book_id = int(book_id) book_id = int(book_id)
return (uuid, book_id) return uuid, book_id
except Exception: except Exception:
return None return None
invalidate = {record(x) for x in raw.splitlines()} invalidate = {record(x) for x in raw.splitlines()}

View File

@ -40,16 +40,16 @@ series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
def get_series_values(val): def get_series_values(val):
if not val: if not val:
return (val, None) return val, None
match = series_index_pat.match(val.strip()) match = series_index_pat.match(val.strip())
if match is not None: if match is not None:
idx = match.group(2) idx = match.group(2)
try: try:
idx = float(idx) idx = float(idx)
return (match.group(1).strip(), idx) return match.group(1).strip(), idx
except: except:
pass pass
return (val, None) return val, None
def multiple_text(sep, ui_sep, x): def multiple_text(sep, ui_sep, x):

View File

@ -92,7 +92,7 @@ class FOLDER_DEVICE(USBMS):
self.report_progress = report_progress self.report_progress = report_progress
def card_prefix(self, end_session=True): def card_prefix(self, end_session=True):
return (None, None) return None, None
def eject(self): def eject(self):
self.is_connected = False self.is_connected = False

View File

@ -2823,7 +2823,7 @@ class KOBOTOUCH(KOBO):
# Did we actually want to letterbox? # Did we actually want to letterbox?
if not letterbox: if not letterbox:
canvas_size = kobo_size canvas_size = kobo_size
return (kobo_size, canvas_size) return kobo_size, canvas_size
def _create_cover_data( def _create_cover_data(
self, cover_data, resize_to, minify_to, kobo_size, self, cover_data, resize_to, minify_to, kobo_size,

View File

@ -687,7 +687,7 @@ class XMLCache:
if 'id' not in record.attrib: if 'id' not in record.attrib:
num = self.max_id(record.getroottree().getroot()) num = self.max_id(record.getroottree().getroot())
record.set('id', str(num+1)) record.set('id', str(num+1))
return (gtz_count, ltz_count, use_tz_var) return gtz_count, ltz_count, use_tz_var
# }}} # }}}
# Writing the XML files {{{ # Writing the XML files {{{

View File

@ -1261,16 +1261,16 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
@synchronous('sync_lock') @synchronous('sync_lock')
def card_prefix(self, end_session=True): def card_prefix(self, end_session=True):
self._debug() self._debug()
return (None, None) return None, None
@synchronous('sync_lock') @synchronous('sync_lock')
def total_space(self, end_session=True): def total_space(self, end_session=True):
self._debug() self._debug()
opcode, result = self._call_client('TOTAL_SPACE', {}) opcode, result = self._call_client('TOTAL_SPACE', {})
if opcode == 'OK': if opcode == 'OK':
return (result['total_space_on_device'], 0, 0) return result['total_space_on_device'], 0, 0
# protocol error if we get here # protocol error if we get here
return (0, 0, 0) return 0, 0, 0
@synchronous('sync_lock') @synchronous('sync_lock')
def free_space(self, end_session=True): def free_space(self, end_session=True):
@ -1280,7 +1280,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
self._debug('free space:', result['free_space_on_device']) self._debug('free space:', result['free_space_on_device'])
return (result['free_space_on_device'], 0, 0) return (result['free_space_on_device'], 0, 0)
# protocol error if we get here # protocol error if we get here
return (0, 0, 0) return 0, 0, 0
@synchronous('sync_lock') @synchronous('sync_lock')
def books(self, oncard=None, end_session=True): def books(self, oncard=None, end_session=True):
@ -1591,7 +1591,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
@synchronous('sync_lock') @synchronous('sync_lock')
def prepare_addable_books(self, paths, this_book=None, total_books=None): def prepare_addable_books(self, paths, this_book=None, total_books=None):
for idx, path in enumerate(paths): for idx, path in enumerate(paths):
(ign, ext) = os.path.splitext(path) ign, ext = os.path.splitext(path)
with PersistentTemporaryFile(suffix=ext) as tf: with PersistentTemporaryFile(suffix=ext) as tf:
self.get_file(path, tf, this_book=this_book, total_books=total_books) self.get_file(path, tf, this_book=this_book, total_books=total_books)
paths[idx] = tf.name paths[idx] = tf.name
@ -1630,12 +1630,12 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
def _check_if_format_send_needed(self, db, id_, book): def _check_if_format_send_needed(self, db, id_, book):
if not self.will_ask_for_update_books: if not self.will_ask_for_update_books:
return (None, False) return None, False
from calibre.utils.date import isoformat, parse_date from calibre.utils.date import isoformat, parse_date
try: try:
if not hasattr(book, '_format_mtime_'): if not hasattr(book, '_format_mtime_'):
return (None, False) return None, False
ext = posixpath.splitext(book.lpath)[1][1:] ext = posixpath.splitext(book.lpath)[1][1:]
fmt_metadata = db.new_api.format_metadata(id_, ext) fmt_metadata = db.new_api.format_metadata(id_, ext)
@ -1647,17 +1647,17 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
self._show_message(_('You have book formats in your library ' self._show_message(_('You have book formats in your library '
'with dates in the future. See calibre ' 'with dates in the future. See calibre '
'for details')) 'for details'))
return (None, True) return None, True
cc_mtime = parse_date(book.get('_format_mtime_'), as_utc=True) cc_mtime = parse_date(book.get('_format_mtime_'), as_utc=True)
self._debug(book.title, 'cal_mtime', calibre_mtime, 'cc_mtime', cc_mtime) self._debug(book.title, 'cal_mtime', calibre_mtime, 'cc_mtime', cc_mtime)
if cc_mtime < calibre_mtime: if cc_mtime < calibre_mtime:
book.set('_format_mtime_', isoformat(self.now)) book.set('_format_mtime_', isoformat(self.now))
return (posixpath.basename(book.lpath), False) return posixpath.basename(book.lpath), False
except: except:
self._debug('exception checking if must send format', book.title) self._debug('exception checking if must send format', book.title)
traceback.print_exc() traceback.print_exc()
return (None, False) return None, False
@synchronous('sync_lock') @synchronous('sync_lock')
def synchronize_with_db(self, db, id_, book, first_call): def synchronize_with_db(self, db, id_, book, first_call):

View File

@ -299,7 +299,7 @@ class CollectionsBookList(BookList):
if all_by_title not in collections: if all_by_title not in collections:
collections[all_by_title] = {} collections[all_by_title] = {}
collections[all_by_title][lpath] = (book, tsval, asval) collections[all_by_title][lpath] = (book, tsval, asval)
for (n, sb) in all_by_something: for n,sb in all_by_something:
if n not in collections: if n not in collections:
collections[n] = {} collections[n] = {}
collections[n][lpath] = (book, book.get(sb, ''), tsval) collections[n][lpath] = (book, book.get(sb, ''), tsval)

View File

@ -174,7 +174,7 @@ class Device(DeviceConfig, DevicePlugin):
casz = self._windows_space(self._card_a_prefix)[0] casz = self._windows_space(self._card_a_prefix)[0]
cbsz = self._windows_space(self._card_b_prefix)[0] cbsz = self._windows_space(self._card_b_prefix)[0]
return (msz, casz, cbsz) return msz, casz, cbsz
def free_space(self, end_session=True): def free_space(self, end_session=True):
msz = casz = cbsz = 0 msz = casz = cbsz = 0
@ -193,7 +193,7 @@ class Device(DeviceConfig, DevicePlugin):
casz = self._windows_space(self._card_a_prefix)[1] casz = self._windows_space(self._card_a_prefix)[1]
cbsz = self._windows_space(self._card_b_prefix)[1] cbsz = self._windows_space(self._card_b_prefix)[1]
return (msz, casz, cbsz) return msz, casz, cbsz
def windows_filter_pnp_id(self, pnp_id): def windows_filter_pnp_id(self, pnp_id):
return False return False

View File

@ -192,7 +192,7 @@ class Block:
@position.setter @position.setter
def position(self, new_pos): def position(self, new_pos):
(x, y) = new_pos x, y = new_pos
self._position = Point(x, y) self._position = Point(x, y)
if self.layouts: if self.layouts:
self.layouts[0].setPosition(QPointF(x, y)) self.layouts[0].setPosition(QPointF(x, y))

View File

@ -499,7 +499,7 @@ class Styles:
prefix = ef + '\n' + prefix prefix = ef + '\n' + prefix
ans = [] ans = []
for (cls, css) in sorted(itervalues(self.classes), key=lambda x:x[0]): for cls, css in sorted(itervalues(self.classes), key=lambda x:x[0]):
b = (f'\t{k}: {v};' for k, v in iteritems(css)) b = (f'\t{k}: {v};' for k, v in iteritems(css))
b = '\n'.join(b) b = '\n'.join(b)
ans.append('.{} {{\n{}\n}}\n'.format(cls, b.rstrip(';'))) ans.append('.{} {{\n{}\n}}\n'.format(cls, b.rstrip(';')))

View File

@ -411,14 +411,14 @@ def oeb2html_no_css(oeb_book, log, opts):
izer = OEB2HTMLNoCSSizer(log) izer = OEB2HTMLNoCSSizer(log)
html = izer.oeb2html(oeb_book, opts) html = izer.oeb2html(oeb_book, opts)
images = izer.images images = izer.images
return (html, images) return html, images
def oeb2html_inline_css(oeb_book, log, opts): def oeb2html_inline_css(oeb_book, log, opts):
izer = OEB2HTMLInlineCSSizer(log) izer = OEB2HTMLInlineCSSizer(log)
html = izer.oeb2html(oeb_book, opts) html = izer.oeb2html(oeb_book, opts)
images = izer.images images = izer.images
return (html, images) return html, images
def oeb2html_class_css(oeb_book, log, opts): def oeb2html_class_css(oeb_book, log, opts):
@ -426,4 +426,4 @@ def oeb2html_class_css(oeb_book, log, opts):
setattr(opts, 'class_style', 'inline') setattr(opts, 'class_style', 'inline')
html = izer.oeb2html(oeb_book, opts) html = izer.oeb2html(oeb_book, opts)
images = izer.images images = izer.images
return (html, images) return html, images

View File

@ -864,7 +864,7 @@ class LitFile:
def get_atoms(self, entry): def get_atoms(self, entry):
name = '/'.join(('/data', entry.internal, 'atom')) name = '/'.join(('/data', entry.internal, 'atom'))
if name not in self.entries: if name not in self.entries:
return ({}, {}) return {}, {}
data = self.get_file(name) data = self.get_file(name)
nentries, data = u32(data), data[4:] nentries, data = u32(data), data[4:]
tags = {} tags = {}
@ -878,7 +878,7 @@ class LitFile:
if len(tags) != nentries: if len(tags) != nentries:
self._warn('damaged or invalid atoms tag table') self._warn('damaged or invalid atoms tag table')
if len(data) < 4: if len(data) < 4:
return (tags, {}) return tags, {}
attrs = {} attrs = {}
nentries, data = u32(data), data[4:] nentries, data = u32(data), data[4:]
for i in range(1, nentries + 1): for i in range(1, nentries + 1):
@ -890,7 +890,7 @@ class LitFile:
attrs[i], data = data[:size], data[size:] attrs[i], data = data[:size], data[size:]
if len(attrs) != nentries: if len(attrs) != nentries:
self._warn('damaged or invalid atoms attributes table') self._warn('damaged or invalid atoms attributes table')
return (tags, attrs) return tags, attrs
class LitContainer: class LitContainer:

View File

@ -300,7 +300,7 @@ class Styles(etree.XSLTExtension):
with open(name, 'wb') as f: with open(name, 'wb') as f:
f.write(as_bytes(self.CSS)) f.write(as_bytes(self.CSS))
for (w, sel) in [(self.text_styles, 'ts'), (self.block_styles, for w,sel in [(self.text_styles, 'ts'), (self.block_styles,
'bs')]: 'bs')]:
for i, s in enumerate(w): for i, s in enumerate(w):
if not s: if not s:

View File

@ -1106,7 +1106,7 @@ class Button(LRFObject):
for i in self.actions[button_type]: for i in self.actions[button_type]:
if i[0] == 1: if i[0] == 1:
return i[1:][0] return i[1:][0]
return (None, None) return None, None
def __str__(self): def __str__(self):
s = '<Button objid="%s">\n'%(self.id,) s = '<Button objid="%s">\n'%(self.id,)

View File

@ -189,7 +189,7 @@ class Delegator:
return applied return applied
def applySettings(self, settings, testValid=False): def applySettings(self, settings, testValid=False):
for (setting, value) in settings.items(): for setting, value in settings.items():
self.applySetting(setting, value, testValid) self.applySetting(setting, value, testValid)
# if setting not in self.delegatedSettingsDict: # if setting not in self.delegatedSettingsDict:
# raise LrsError("setting %s not valid" % setting) # raise LrsError("setting %s not valid" % setting)
@ -232,7 +232,7 @@ class LrsAttributes:
if alsoAllow is None: if alsoAllow is None:
alsoAllow = [] alsoAllow = []
self.attrs = defaults.copy() self.attrs = defaults.copy()
for (name, value) in settings.items(): for name, value in settings.items():
if name not in self.attrs and name not in alsoAllow: if name not in self.attrs and name not in alsoAllow:
raise LrsError('%s does not support setting %s' % raise LrsError('%s does not support setting %s' %
(self.__class__.__name__, name)) (self.__class__.__name__, name))
@ -1615,7 +1615,7 @@ class Button(LrsObject, LrsContainer):
raise LrsError('%s has no PushButton or JumpTo subs'%self.__class__.__name__) raise LrsError('%s has no PushButton or JumpTo subs'%self.__class__.__name__)
def toLrf(self, lrfWriter): def toLrf(self, lrfWriter):
(refobj, refpage) = self.findJumpToRefs() refobj, refpage = self.findJumpToRefs()
# print('Button writing JumpTo refobj=', jumpto.refobj, ', and refpage=', jumpto.refpage) # print('Button writing JumpTo refobj=', jumpto.refobj, ', and refpage=', jumpto.refpage)
button = LrfObject('Button', self.objId) button = LrfObject('Button', self.objId)
button.appendLrfTag(LrfTag('buttonflags', 0x10)) # pushbutton button.appendLrfTag(LrfTag('buttonflags', 0x10)) # pushbutton
@ -1842,7 +1842,7 @@ class Span(LrsSimpleChar1, LrsContainer):
oldTextStyle = self.findCurrentTextStyle() oldTextStyle = self.findCurrentTextStyle()
# set the attributes we want changed # set the attributes we want changed
for (name, value) in tuple(iteritems(self.attrs)): for name, value in tuple(iteritems(self.attrs)):
if name in oldTextStyle.attrs and oldTextStyle.attrs[name] == self.attrs[name]: if name in oldTextStyle.attrs and oldTextStyle.attrs[name] == self.attrs[name]:
self.attrs.pop(name) self.attrs.pop(name)
else: else:
@ -1864,7 +1864,7 @@ class Span(LrsSimpleChar1, LrsContainer):
def toElement(self, se): def toElement(self, se):
element = Element('Span') element = Element('Span')
for (key, value) in self.attrs.items(): for key, value in self.attrs.items():
element.set(key, str(value)) element.set(key, str(value))
appendTextElements(element, self.contents, se) appendTextElements(element, self.contents, se)

View File

@ -668,7 +668,7 @@ class Metadata:
Returns the tuple (display_name, formatted_value) Returns the tuple (display_name, formatted_value)
''' '''
name, val, ign, ign = self.format_field_extended(key, series_with_index) name, val, ign, ign = self.format_field_extended(key, series_with_index)
return (name, val) return name, val
def format_field_extended(self, key, series_with_index=True): def format_field_extended(self, key, series_with_index=True):
from calibre.ebooks.metadata import authors_to_string from calibre.ebooks.metadata import authors_to_string
@ -803,7 +803,7 @@ class Metadata:
for key in self.custom_field_keys(): for key in self.custom_field_keys():
val = self.get(key, None) val = self.get(key, None)
if val: if val:
(name, val) = self.format_field(key) name, val = self.format_field(key)
fmt(name, str(val)) fmt(name, str(val))
return '\n'.join(ans) return '\n'.join(ans)
@ -832,7 +832,7 @@ class Metadata:
for key in self.custom_field_keys(): for key in self.custom_field_keys():
val = self.get(key, None) val = self.get(key, None)
if val: if val:
(name, val) = self.format_field(key) name, val = self.format_field(key)
ans += [(name, val)] ans += [(name, val)]
for i, x in enumerate(ans): for i, x in enumerate(ans):
ans[i] = '<tr><td><b>%s</b></td><td>%s</td></tr>'%x ans[i] = '<tr><td><b>%s</b></td><td>%s</td></tr>'%x

View File

@ -30,7 +30,7 @@ def get_cover(docx):
if width < 0 or height < 0: if width < 0 or height < 0:
continue continue
if 0.8 <= height/width <= 1.8 and height*width >= 160000: if 0.8 <= height/width <= 1.8 and height*width >= 160000:
return (fmt, raw) return fmt, raw
def get_metadata(stream): def get_metadata(stream):

View File

@ -232,7 +232,7 @@ def get_metadata_(src, encoding=None):
mi.tags = tags mi.tags = tags
# IDENTIFIERS # IDENTIFIERS
for (k,v) in iteritems(meta_tag_ids): for k,v in iteritems(meta_tag_ids):
v = [x.strip() for x in v if x.strip()] v = [x.strip() for x in v if x.strip()]
if v: if v:
mi.set_identifier(k, v[0]) mi.set_identifier(k, v[0])

View File

@ -299,7 +299,7 @@ class MetadataUpdater:
return pdbrecords return pdbrecords
def update_pdbrecords(self, updated_pdbrecords): def update_pdbrecords(self, updated_pdbrecords):
for (i, pdbrecord) in enumerate(updated_pdbrecords): for i,pdbrecord in enumerate(updated_pdbrecords):
self.data[78+i*8:78+i*8 + 4] = pack('>L',pdbrecord) self.data[78+i*8:78+i*8 + 4] = pack('>L',pdbrecord)
# Refresh local copy # Refresh local copy

View File

@ -204,7 +204,7 @@ def read_refines(root):
def refdef(prop, val, scheme=None): def refdef(prop, val, scheme=None):
return (prop, val, scheme) return prop, val, scheme
def set_refines(elem, existing_refines, *new_refines): def set_refines(elem, existing_refines, *new_refines):

View File

@ -31,7 +31,7 @@ class Reader:
if codelen <= 8: if codelen <= 8:
assert term assert term
maxcode = ((maxcode + 1) << (32 - codelen)) - 1 maxcode = ((maxcode + 1) << (32 - codelen)) - 1
return (codelen, term, maxcode) return codelen, term, maxcode
self.dict1 = tuple(map(dict1_unpack, struct.unpack_from(b'>256L', huff, off1))) self.dict1 = tuple(map(dict1_unpack, struct.unpack_from(b'>256L', huff, off1)))
dict2 = struct.unpack_from(b'>64L', huff, off2) dict2 = struct.unpack_from(b'>64L', huff, off2)

View File

@ -1014,14 +1014,14 @@ class Manifest:
hrefs = self.oeb.manifest.hrefs hrefs = self.oeb.manifest.hrefs
if path not in hrefs: if path not in hrefs:
self.oeb.logger.warn('CSS import of missing file %r' % path) self.oeb.logger.warn('CSS import of missing file %r' % path)
return (None, None) return None, None
item = hrefs[path] item = hrefs[path]
if item.media_type not in OEB_STYLES: if item.media_type not in OEB_STYLES:
self.oeb.logger.warn('CSS import of non-CSS file %r' % path) self.oeb.logger.warn('CSS import of non-CSS file %r' % path)
return (None, None) return None, None
data = item.data.cssText data = item.data.cssText
enc = None if isinstance(data, str) else 'utf-8' enc = None if isinstance(data, str) else 'utf-8'
return (enc, data) return enc, data
# }}} # }}}

View File

@ -193,7 +193,7 @@ def condense_edge(vals):
if len(edges) != 4 or set(edges) != {'left', 'top', 'right', 'bottom'}: if len(edges) != 4 or set(edges) != {'left', 'top', 'right', 'bottom'}:
return return
ce = {} ce = {}
for (x, y) in [('left', 'right'), ('top', 'bottom')]: for x,y in [('left', 'right'), ('top', 'bottom')]:
if edges[x] == edges[y]: if edges[x] == edges[y]:
ce[x] = edges[x] ce[x] = edges[x]
else: else:

View File

@ -90,7 +90,7 @@ def create_profile():
if ans is None: if ans is None:
ans = create_profile.ans = QWebEngineProfile(QApplication.instance()) ans = create_profile.ans = QWebEngineProfile(QApplication.instance())
setup_profile(ans) setup_profile(ans)
for (name, code) in stylelint_js(): for name, code in stylelint_js():
s = QWebEngineScript() s = QWebEngineScript()
s.setName(name) s.setName(name)
s.setSourceCode(code) s.setSourceCode(code)

View File

@ -75,7 +75,7 @@ def pretty_opf(root):
i = spine_ids.get(x.get('id', None), 1000000000) i = spine_ids.get(x.get('id', None), 1000000000)
else: else:
i = sort_key(href) i = sort_key(href)
return (cat, i) return cat, i
for manifest in root.xpath('//opf:manifest', namespaces=OPF_NAMESPACES): for manifest in root.xpath('//opf:manifest', namespaces=OPF_NAMESPACES):
try: try:

View File

@ -32,7 +32,7 @@ OPF_TEMPLATE = '''
def create_manifest_item(name, data=b'', properties=None): def create_manifest_item(name, data=b'', properties=None):
return (name, data, properties) return name, data, properties
cmi = create_manifest_item cmi = create_manifest_item

View File

@ -396,15 +396,15 @@ class Stylizer:
hrefs = self.oeb.manifest.hrefs hrefs = self.oeb.manifest.hrefs
if path not in hrefs: if path not in hrefs:
self.logger.warn('CSS import of missing file %r' % path) self.logger.warn('CSS import of missing file %r' % path)
return (None, None) return None, None
item = hrefs[path] item = hrefs[path]
if item.media_type not in OEB_STYLES: if item.media_type not in OEB_STYLES:
self.logger.warn('CSS import of non-CSS file %r' % path) self.logger.warn('CSS import of non-CSS file %r' % path)
return (None, None) return None, None
data = item.data.cssText data = item.data.cssText
if not isinstance(data, bytes): if not isinstance(data, bytes):
data = data.encode('utf-8') data = data.encode('utf-8')
return ('utf-8', data) return 'utf-8', data
def style(self, element): def style(self, element):
try: try:

View File

@ -49,7 +49,7 @@ punct_table = {
def fix_punct(line): def fix_punct(line):
for (key, value) in punct_table.items(): for key, value in punct_table.items():
line = line.replace(key, value) line = line.replace(key, value)
return line return line

View File

@ -41,7 +41,7 @@ class PdbHeaderReader:
self.stream.seek(78 + number * 8) self.stream.seek(78 + number * 8)
offset, a1, a2, a3, a4 = struct.unpack('>LBBBB', self.stream.read(8))[0] offset, a1, a2, a3, a4 = struct.unpack('>LBBBB', self.stream.read(8))[0]
flags, val = a1, a2 << 16 | a3 << 8 | a4 flags, val = a1, a2 << 16 | a3 << 8 | a4
return (offset, flags, val) return offset, flags, val
def section_offset(self, number): def section_offset(self, number):
if not (0 <= number < self.num_sections): if not (0 <= number < self.num_sections):

View File

@ -104,7 +104,7 @@ class RBWriter:
zobj = zlib.compressobj(9, zlib.DEFLATED, 13, 8, 0) zobj = zlib.compressobj(9, zlib.DEFLATED, 13, 8, 0)
pages.append(zobj.compress(text[i * TEXT_RECORD_SIZE : (i * TEXT_RECORD_SIZE) + TEXT_RECORD_SIZE]) + zobj.flush()) pages.append(zobj.compress(text[i * TEXT_RECORD_SIZE : (i * TEXT_RECORD_SIZE) + TEXT_RECORD_SIZE]) + zobj.flush())
return (size, pages) return size, pages
def _images(self, manifest): def _images(self, manifest):
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES

View File

@ -52,10 +52,10 @@ class CheckBrackets:
self.open_brack(line) self.open_brack(line)
if self.__token_info == 'cb<nu<clos-brack': if self.__token_info == 'cb<nu<clos-brack':
if not self.close_brack(line): if not self.close_brack(line):
return (False, "closed bracket doesn't match, line %s" % line_count) return False, "closed bracket doesn't match, line %s" % line_count
if self.__bracket_count != 0: if self.__bracket_count != 0:
msg = ("At end of file open and closed brackets don't match\n" msg = ("At end of file open and closed brackets don't match\n"
'total number of brackets is %s') % self.__bracket_count 'total number of brackets is %s') % self.__bracket_count
return (False, msg) return False, msg
return (True, 'Brackets match!') return True, 'Brackets match!'

View File

@ -783,7 +783,7 @@ class Textile:
True True
''' '''
(scheme, netloc) = urlparse(url)[0:2] scheme, netloc = urlparse(url)[0:2]
return not scheme and not netloc return not scheme and not netloc
def relURL(self, url): def relURL(self, url):

View File

@ -444,8 +444,7 @@ class DeviceManager(Thread): # {{{
kls = None kls = None
while True: while True:
try: try:
(kls,device_kind, folder_path) = \ kls, device_kind, folder_path = self.mount_connection_requests.get_nowait()
self.mount_connection_requests.get_nowait()
except queue.Empty: except queue.Empty:
break break
if kls is not None: if kls is not None:
@ -566,7 +565,7 @@ class DeviceManager(Thread): # {{{
mainlist = self.device.books(oncard=None, end_session=False) mainlist = self.device.books(oncard=None, end_session=False)
cardalist = self.device.books(oncard='carda') cardalist = self.device.books(oncard='carda')
cardblist = self.device.books(oncard='cardb') cardblist = self.device.books(oncard='cardb')
return (mainlist, cardalist, cardblist) return mainlist, cardalist, cardblist
def books(self, done, add_as_step_to_job=None): def books(self, done, add_as_step_to_job=None):
'''Return callable that returns the list of books on device as two booklists''' '''Return callable that returns the list of books on device as two booklists'''

View File

@ -432,11 +432,11 @@ class CheckLibraryDialog(QDialog):
c = node.child(i).checkState(2) c = node.child(i).checkState(2)
checked = checked or c == Qt.CheckState.Checked checked = checked or c == Qt.CheckState.Checked
all_checked = all_checked and c == Qt.CheckState.Checked all_checked = all_checked and c == Qt.CheckState.Checked
return (checked, all_checked) return checked, all_checked
def any_child_delete_checked(): def any_child_delete_checked():
for parent in self.top_level_items.values(): for parent in self.top_level_items.values():
(c, _) = is_child_delete_checked(parent) c, _ = is_child_delete_checked(parent)
if c: if c:
return True return True
return False return False
@ -464,7 +464,7 @@ class CheckLibraryDialog(QDialog):
else: else:
for parent in self.top_level_items.values(): for parent in self.top_level_items.values():
if parent.data(2, Qt.ItemDataRole.UserRole) == self.is_deletable: if parent.data(2, Qt.ItemDataRole.UserRole) == self.is_deletable:
(child_chkd, all_chkd) = is_child_delete_checked(parent) child_chkd, all_chkd = is_child_delete_checked(parent)
if all_chkd and child_chkd: if all_chkd and child_chkd:
check_state = Qt.CheckState.Checked check_state = Qt.CheckState.Checked
elif child_chkd: elif child_chkd:

View File

@ -86,8 +86,8 @@ class DeleteMatchingFromDeviceDialog(QDialog, Ui_DeleteMatchingFromDeviceDialog)
self.table.setRowCount(rows) self.table.setRowCount(rows)
row = 0 row = 0
for card in items: for card in items:
(model,books) = items[card] model, books = items[card]
for (id,book) in books: for id,book in books:
item = QTableWidgetItem() item = QTableWidgetItem()
item.setFlags(Qt.ItemFlag.ItemIsUserCheckable|Qt.ItemFlag.ItemIsEnabled) item.setFlags(Qt.ItemFlag.ItemIsUserCheckable|Qt.ItemFlag.ItemIsEnabled)
item.setCheckState(Qt.CheckState.Checked) item.setCheckState(Qt.CheckState.Checked)
@ -115,7 +115,7 @@ class DeleteMatchingFromDeviceDialog(QDialog, Ui_DeleteMatchingFromDeviceDialog)
for row in range(self.table.rowCount()): for row in range(self.table.rowCount()):
if self.table.item(row, 0).checkState() == Qt.CheckState.Unchecked: if self.table.item(row, 0).checkState() == Qt.CheckState.Unchecked:
continue continue
(model, id, path) = self.table.item(row, 0).data(Qt.ItemDataRole.UserRole) model, id, path = self.table.item(row, 0).data(Qt.ItemDataRole.UserRole)
path = str(path) path = str(path)
self.result.append((model, id, path)) self.result.append((model, id, path))
return return

View File

@ -679,40 +679,40 @@ class Quickview(QDialog, Ui_Quickview):
mi = self.db.new_api.get_proxy_metadata(book_id) mi = self.db.new_api.get_proxy_metadata(book_id)
try: try:
if col == 'title': if col == 'title':
return (mi.title, mi.title_sort, 0) return mi.title, mi.title_sort, 0
elif col == 'authors': elif col == 'authors':
return (' & '.join(mi.authors), mi.author_sort, 0) return ' & '.join(mi.authors), mi.author_sort, 0
elif col == 'series': elif col == 'series':
series = mi.format_field('series')[1] series = mi.format_field('series')[1]
if series is None: if series is None:
return ('', None, 0) return '', None, 0
else: else:
return (series, mi.series, mi.series_index) return series, mi.series, mi.series_index
elif col == 'size': elif col == 'size':
v = mi.get('book_size') v = mi.get('book_size')
if v is not None: if v is not None:
return (f'{v:n}', v, 0) return f'{v:n}', v, 0
else: else:
return ('', None, 0) return '', None, 0
elif self.fm[col]['datatype'] == 'series': elif self.fm[col]['datatype'] == 'series':
v = mi.format_field(col)[1] v = mi.format_field(col)[1]
return (v, mi.get(col), mi.get(col+'_index')) return v, mi.get(col), mi.get(col+'_index')
elif self.fm[col]['datatype'] == 'datetime': elif self.fm[col]['datatype'] == 'datetime':
v = mi.format_field(col)[1] v = mi.format_field(col)[1]
d = mi.get(col) d = mi.get(col)
if d is None: if d is None:
d = UNDEFINED_DATE d = UNDEFINED_DATE
return (v, timestampfromdt(d), 0) return v, timestampfromdt(d), 0
elif self.fm[col]['datatype'] in ('float', 'int'): elif self.fm[col]['datatype'] in ('float', 'int'):
v = mi.format_field(col)[1] v = mi.format_field(col)[1]
sort_val = mi.get(col) sort_val = mi.get(col)
return (v, sort_val, 0) return v, sort_val, 0
else: else:
v = mi.format_field(col)[1] v = mi.format_field(col)[1]
return (v, v, 0) return v, v, 0
except: except:
traceback.print_exc() traceback.print_exc()
return (_('Something went wrong while filling in the table'), '', 0) return _('Something went wrong while filling in the table'), '', 0
# Deal with sizing the table columns. Done here because the numbers are not # Deal with sizing the table columns. Done here because the numbers are not
# correct until the first paint. # correct until the first paint.

View File

@ -170,7 +170,7 @@ def render_note_line(line):
yield prepare_string_for_xml(line) yield prepare_string_for_xml(line)
return return
pos = 0 pos = 0
for (s, e) in urls: for s,e in urls:
if s > pos: if s > pos:
yield prepare_string_for_xml(line[pos:s]) yield prepare_string_for_xml(line[pos:s])
yield '<a href="{0}">{0}</a>'.format(prepare_string_for_xml(line[s:e], True)) yield '<a href="{0}">{0}</a>'.format(prepare_string_for_xml(line[s:e], True))

View File

@ -1829,7 +1829,7 @@ class IdentifiersEdit(QLineEdit, ToMetadataMixin, LineEditIndicators):
return True return True
except Exception: except Exception:
pass pass
for (key, prefix) in ( for key, prefix in (
('doi', 'https://dx.doi.org/'), ('doi', 'https://dx.doi.org/'),
('doi', 'https://doi.org/'), ('doi', 'https://doi.org/'),
('arxiv', 'https://arxiv.org/abs/'), ('arxiv', 'https://arxiv.org/abs/'),

View File

@ -233,11 +233,10 @@ class ConditionEditor(QWidget): # {{{
@property @property
def condition(self): def condition(self):
c, a, v = (self.current_col, self.current_action, c, a, v = (self.current_col, self.current_action, self.current_val)
self.current_val)
if not c or not a: if not c or not a:
return None return None
return (c, a, v) return c, a, v
@condition.setter @condition.setter
def condition(self, condition): def condition(self, condition):
@ -864,7 +863,7 @@ class RulesModel(QAbstractListModel): # {{{
col = self.fm[col]['name'] col = self.fm[col]['name']
return self.rule_to_html(kind, col, rule) return self.rule_to_html(kind, col, rule)
if role == Qt.ItemDataRole.UserRole: if role == Qt.ItemDataRole.UserRole:
return (kind, col, rule) return kind, col, rule
def add_rule(self, kind, col, rule, selected_row=None): def add_rule(self, kind, col, rule, selected_row=None):
self.beginResetModel() self.beginResetModel()

View File

@ -712,7 +712,7 @@ class TagsView(QTreeView): # {{{
self._model.set_value_icon(key, TEMPLATE_ICON_INDICATOR, d.rule[2], False) self._model.set_value_icon(key, TEMPLATE_ICON_INDICATOR, d.rule[2], False)
self.recount() self.recount()
return return
(icon_file_name, for_children) = extra if extra is not None else (None, None) icon_file_name, for_children = extra if extra is not None else (None, None)
item_val, desired_file_name = make_icon_name(key, index) item_val, desired_file_name = make_icon_name(key, index)
if icon_file_name is None: if icon_file_name is None:
# User wants to specify a specific icon # User wants to specify a specific icon
@ -1283,7 +1283,7 @@ class TagsView(QTreeView): # {{{
if key not in ('search', 'formats') and not key.startswith('@'): if key not in ('search', 'formats') and not key.startswith('@'):
def get_rule_data(tag, key): def get_rule_data(tag, key):
if tag is None: if tag is None:
return (None, None, None) return None, None, None
name = tag.original_name name = tag.original_name
cat_rules = self._model.value_icons.get(key, {}) cat_rules = self._model.value_icons.get(key, {})
icon_name, for_child = cat_rules.get(name, (None, None)) icon_name, for_child = cat_rules.get(name, (None, None))

View File

@ -783,7 +783,7 @@ class FileList(QTreeWidget, OpenWithHandler):
for i in range(parent.childCount()): for i in range(parent.childCount()):
item = parent.child(i) item = parent.child(i)
if str(item.data(0, NAME_ROLE) or '') == name: if str(item.data(0, NAME_ROLE) or '') == name:
return (category, i) return category, i
return (None, -1) return (None, -1)
def merge_files(self): def merge_files(self):

View File

@ -284,7 +284,7 @@ def searchable_text_for_name(name):
if children: if children:
for child in reversed(children): for child in reversed(children):
a((child, ignore_text_in_node_and_children, in_ruby)) a((child, ignore_text_in_node_and_children, in_ruby))
for (tail, body) in removed_tails: for tail, body in removed_tails:
if tail is not None: if tail is not None:
body['l'] = tail body['l'] = tail
return ''.join(ans), anchor_offset_map return ''.join(ans), anchor_offset_map
@ -419,7 +419,7 @@ def search_in_name(name, search_query, ctx_size=75):
return spans.append((s, s + l)) return spans.append((s, s + l))
primary_collator_without_punctuation().find_all(search_query.text, raw, a, search_query.mode == 'word') primary_collator_without_punctuation().find_all(search_query.text, raw, a, search_query.mode == 'word')
for (start, end) in miter(): for start, end in miter():
before = raw[max(0, start-ctx_size):start] before = raw[max(0, start-ctx_size):start]
after = raw[end:end+ctx_size] after = raw[end:end+ctx_size]
yield before, raw[start:end], after, start yield before, raw[start:end], after, start

View File

@ -56,7 +56,7 @@ class MetadataBackup(Thread): # {{{
while self.keep_running: while self.keep_running:
try: try:
time.sleep(2) # Limit to one book per two seconds time.sleep(2) # Limit to one book per two seconds
(id_, sequence) = self.db.get_a_dirtied_book() id_, sequence = self.db.get_a_dirtied_book()
if id_ is None: if id_ is None:
continue continue
# print('writer thread', id_, sequence) # print('writer thread', id_, sequence)
@ -381,10 +381,10 @@ class ResultCache(SearchQueryParser): # {{{
relop = None relop = None
for k in self.date_search_relops.keys(): for k in self.date_search_relops.keys():
if query.startswith(k): if query.startswith(k):
(p, relop) = self.date_search_relops[k] p, relop = self.date_search_relops[k]
query = query[p:] query = query[p:]
if relop is None: if relop is None:
(p, relop) = self.date_search_relops['='] p, relop = self.date_search_relops['=']
if query in self.local_today: if query in self.local_today:
qd = now() qd = now()
@ -464,10 +464,10 @@ class ResultCache(SearchQueryParser): # {{{
relop = None relop = None
for k in self.numeric_search_relops.keys(): for k in self.numeric_search_relops.keys():
if query.startswith(k): if query.startswith(k):
(p, relop) = self.numeric_search_relops[k] p, relop = self.numeric_search_relops[k]
query = query[p:] query = query[p:]
if relop is None: if relop is None:
(p, relop) = self.numeric_search_relops['='] p, relop = self.numeric_search_relops['=']
if dt == 'int': if dt == 'int':
def cast(x): def cast(x):
@ -541,7 +541,7 @@ class ResultCache(SearchQueryParser): # {{{
if len(q) != 2: if len(q) != 2:
raise ParseException( raise ParseException(
_('Invalid query format for colon-separated search: {0}').format(query)) _('Invalid query format for colon-separated search: {0}').format(query))
(keyq, valq) = q keyq, valq = q
keyq_mkind, keyq = self._matchkind(keyq) keyq_mkind, keyq = self._matchkind(keyq)
valq_mkind, valq = self._matchkind(valq) valq_mkind, valq = self._matchkind(valq)
else: else:

View File

@ -551,7 +551,7 @@ class CatalogBuilder:
authors = [(record['author'], record['author_sort']) for record in books_by_author] authors = [(record['author'], record['author_sort']) for record in books_by_author]
current_author = authors[0] current_author = authors[0]
for (i, author) in enumerate(authors): for i, author in enumerate(authors):
if author != current_author and i: if author != current_author and i:
if author[0] == current_author[0]: if author[0] == current_author[0]:
if self.opts.fmt == 'mobi': if self.opts.fmt == 'mobi':
@ -808,7 +808,7 @@ class CatalogBuilder:
multiple_authors = False multiple_authors = False
unique_authors = [] unique_authors = []
individual_authors = set() individual_authors = set()
for (i, author) in enumerate(authors): for i, author in enumerate(authors):
if author != current_author: if author != current_author:
# Note that current_author and author are tuples: (friendly, sort) # Note that current_author and author are tuples: (friendly, sort)
multiple_authors = True multiple_authors = True
@ -1201,7 +1201,7 @@ class CatalogBuilder:
def _format_tag_list(tags, indent=1, line_break=70, header='Tag list'): def _format_tag_list(tags, indent=1, line_break=70, header='Tag list'):
def _next_tag(sorted_tags): def _next_tag(sorted_tags):
for (i, tag) in enumerate(sorted_tags): for i, tag in enumerate(sorted_tags):
if i < len(tags) - 1: if i < len(tags) - 1:
yield tag + ', ' yield tag + ', '
else: else:
@ -1845,7 +1845,7 @@ class CatalogBuilder:
date_range_list = [] date_range_list = []
today_time = nowf().replace(hour=23, minute=59, second=59) today_time = nowf().replace(hour=23, minute=59, second=59)
for (i, date) in enumerate(self.DATE_RANGE): for i, date in enumerate(self.DATE_RANGE):
date_range_limit = self.DATE_RANGE[i] date_range_limit = self.DATE_RANGE[i]
if i: if i:
date_range = '%d to %d days ago' % (self.DATE_RANGE[i - 1], self.DATE_RANGE[i]) date_range = '%d to %d days ago' % (self.DATE_RANGE[i - 1], self.DATE_RANGE[i])
@ -2137,7 +2137,7 @@ class CatalogBuilder:
# genre_list = [ {friendly_tag:[{book},{book}]}, {friendly_tag:[{book},{book}]}, ...] # genre_list = [ {friendly_tag:[{book},{book}]}, {friendly_tag:[{book},{book}]}, ...]
master_genre_list = [] master_genre_list = []
for genre_tag_set in genre_list: for genre_tag_set in genre_list:
for (index, genre) in enumerate(genre_tag_set): for index, genre in enumerate(genre_tag_set):
# print('genre: %s \t genre_tag_set[genre]: %s' % (genre, genre_tag_set[genre])) # print('genre: %s \t genre_tag_set[genre]: %s' % (genre, genre_tag_set[genre]))
# Create sorted_authors[0] = friendly, [1] = author_sort for NCX creation # Create sorted_authors[0] = friendly, [1] = author_sort for NCX creation
@ -2150,7 +2150,7 @@ class CatalogBuilder:
books_by_current_author = 1 books_by_current_author = 1
current_author = authors[0] current_author = authors[0]
unique_authors = [] unique_authors = []
for (i, author) in enumerate(authors): for i, author in enumerate(authors):
if author != current_author and i: if author != current_author and i:
unique_authors.append((current_author[0], current_author[1], books_by_current_author)) unique_authors.append((current_author[0], current_author[1], books_by_current_author))
current_author = author current_author = author
@ -2698,7 +2698,7 @@ class CatalogBuilder:
_soup = BeautifulSoup('') _soup = BeautifulSoup('')
genresTag = _soup.new_tag('p') genresTag = _soup.new_tag('p')
gtc = 0 gtc = 0
for (i, tag) in enumerate(sorted(book.get('genres', []))): for i, tag in enumerate(sorted(book.get('genres', []))):
aTag = _soup.new_tag('a') aTag = _soup.new_tag('a')
if self.opts.generate_genres: if self.opts.generate_genres:
try: try:
@ -2835,7 +2835,7 @@ class CatalogBuilder:
self.update_progress_full_step(_('Descriptions HTML')) self.update_progress_full_step(_('Descriptions HTML'))
for (title_num, title) in enumerate(self.books_by_title): for title_num, title in enumerate(self.books_by_title):
self.update_progress_micro_step('%s %d of %d' % self.update_progress_micro_step('%s %d of %d' %
(_('Description HTML'), (_('Description HTML'),
title_num, len(self.books_by_title)), title_num, len(self.books_by_title)),
@ -3167,7 +3167,7 @@ class CatalogBuilder:
_add_to_series_by_letter(current_series_list) _add_to_series_by_letter(current_series_list)
# Add *article* entries for each populated series title letter # Add *article* entries for each populated series title letter
for (i, books) in enumerate(series_by_letter): for i, books in enumerate(series_by_letter):
sec_id = '%sSeries-ID' % (title_letters[i].upper()) sec_id = '%sSeries-ID' % (title_letters[i].upper())
if len(title_letters[i]) > 1: if len(title_letters[i]) > 1:
fmt_string = _('Series beginning with %s') fmt_string = _('Series beginning with %s')
@ -3251,7 +3251,7 @@ class CatalogBuilder:
_add_to_books_by_letter(current_book_list) _add_to_books_by_letter(current_book_list)
# Add *article* entries for each populated title letter # Add *article* entries for each populated title letter
for (i, books) in enumerate(books_by_letter): for i, books in enumerate(books_by_letter):
sec_id = '%sTitles-ID' % (title_letters[i].upper()) sec_id = '%sTitles-ID' % (title_letters[i].upper())
if len(title_letters[i]) > 1: if len(title_letters[i]) > 1:
fmt_string = _('Titles beginning with %s') fmt_string = _('Titles beginning with %s')
@ -3384,7 +3384,7 @@ class CatalogBuilder:
master_date_range_list = [] master_date_range_list = []
today = datetime.datetime.now() today = datetime.datetime.now()
today_time = datetime.datetime(today.year, today.month, today.day) today_time = datetime.datetime(today.year, today.month, today.day)
for (i, date) in enumerate(self.DATE_RANGE): for i, date in enumerate(self.DATE_RANGE):
if i: if i:
date_range = '%d to %d days ago' % (self.DATE_RANGE[i - 1], self.DATE_RANGE[i]) date_range = '%d to %d days ago' % (self.DATE_RANGE[i - 1], self.DATE_RANGE[i])
else: else:
@ -3493,7 +3493,7 @@ class CatalogBuilder:
master_date_range_list = [] master_date_range_list = []
today = datetime.datetime.now() today = datetime.datetime.now()
today_time = datetime.datetime(today.year, today.month, today.day) today_time = datetime.datetime(today.year, today.month, today.day)
for (i, date) in enumerate(self.DATE_RANGE): for i, date in enumerate(self.DATE_RANGE):
if i: if i:
date_range = '%d to %d days ago' % (self.DATE_RANGE[i - 1], self.DATE_RANGE[i]) date_range = '%d to %d days ago' % (self.DATE_RANGE[i - 1], self.DATE_RANGE[i])
else: else:
@ -3816,7 +3816,7 @@ class CatalogBuilder:
title_words = title_sort(title).split() title_words = title_sort(title).split()
translated = [] translated = []
for (i, word) in enumerate(title_words): for i, word in enumerate(title_words):
# Leading numbers optionally translated to text equivalent # Leading numbers optionally translated to text equivalent
# Capitalize leading sort word # Capitalize leading sort word
if i == 0: if i == 0:
@ -3931,7 +3931,7 @@ class CatalogBuilder:
self.update_progress_full_step(_('Thumbnails')) self.update_progress_full_step(_('Thumbnails'))
thumbs = ['thumbnail_default.jpg'] thumbs = ['thumbnail_default.jpg']
image_dir = '%s/images' % self.catalog_path image_dir = '%s/images' % self.catalog_path
for (i, title) in enumerate(self.books_by_title): for i, title in enumerate(self.books_by_title):
# Update status # Update status
self.update_progress_micro_step('%s %d of %d' % self.update_progress_micro_step('%s %d of %d' %
(_('Thumbnail'), i, len(self.books_by_title)), (_('Thumbnail'), i, len(self.books_by_title)),

View File

@ -185,7 +185,7 @@ class CheckLibrary:
return False return False
def process_book(self, lib, book_info): def process_book(self, lib, book_info):
(db_path, title_dir, book_id) = book_info db_path, title_dir, book_id = book_info
filenames = frozenset(f for f in os.listdir(os.path.join(lib, db_path)) filenames = frozenset(f for f in os.listdir(os.path.join(lib, db_path))
if not self.ignore_name(f) and ( if not self.ignore_name(f) and (
os.path.splitext(f)[1] not in self.ignore_ext or os.path.splitext(f)[1] not in self.ignore_ext or

View File

@ -257,11 +257,11 @@ class CustomColumns:
if ans is UNDEFINED_DATE: if ans is UNDEFINED_DATE:
ans = None ans = None
if data['datatype'] != 'series': if data['datatype'] != 'series':
return (ans, None) return ans, None
ign,lt = self.custom_table_names(data['num']) ign,lt = self.custom_table_names(data['num'])
extra = self.conn.get('''SELECT extra FROM %s extra = self.conn.get('''SELECT extra FROM %s
WHERE book=?'''%lt, (idx,), all=False) WHERE book=?'''%lt, (idx,), all=False)
return (ans, extra) return ans, extra
# convenience methods for tag editing # convenience methods for tag editing
def get_custom_items_with_ids(self, label=None, num=None): def get_custom_items_with_ids(self, label=None, num=None):
@ -547,7 +547,7 @@ class CustomColumns:
val = self.custom_data_adapters[data['datatype']](val, data) val = self.custom_data_adapters[data['datatype']](val, data)
if data['datatype'] == 'series' and extra is None: if data['datatype'] == 'series' and extra is None:
(val, extra) = self._get_series_values(val) val, extra = self._get_series_values(val)
if extra is None: if extra is None:
extra = 1.0 extra = 1.0

View File

@ -937,8 +937,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
# reason. # reason.
id_ = list(self.dirtied_cache.keys())[random.randint(0, l-1)] id_ = list(self.dirtied_cache.keys())[random.randint(0, l-1)]
sequence = self.dirtied_cache[id_] sequence = self.dirtied_cache[id_]
return (id_, sequence) return id_, sequence
return (None, None) return None, None
def dirty_queue_length(self): def dirty_queue_length(self):
return len(self.dirtied_cache) return len(self.dirtied_cache)
@ -979,7 +979,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
# This almost certainly means that the book has been deleted while # This almost certainly means that the book has been deleted while
# the backup operation sat in the queue. # the backup operation sat in the queue.
pass pass
return (path, mi, sequence) return path, mi, sequence
def get_metadata(self, idx, index_is_id=False, get_cover=False, def get_metadata(self, idx, index_is_id=False, get_cover=False,
get_user_categories=True, cover_as_data=False): get_user_categories=True, cover_as_data=False):
@ -1896,7 +1896,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
item.rc += 1 item.rc += 1
continue continue
try: try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly item_id, sort_val = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None) item = tcats_cat.get(val, None)
if not item: if not item:
item = tag_class(val, sort_val) item = tag_class(val, sort_val)
@ -1918,7 +1918,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
tid_cat[val] = (val, val) tid_cat[val] = (val, val)
for val in vals: for val in vals:
try: try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly item_id, sort_val = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None) item = tcats_cat.get(val, None)
if not item: if not item:
item = tag_class(val, sort_val) item = tag_class(val, sort_val)
@ -3187,7 +3187,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
def set_series(self, id, series, notify=True, commit=True, allow_case_change=True): def set_series(self, id, series, notify=True, commit=True, allow_case_change=True):
self.conn.execute('DELETE FROM books_series_link WHERE book=?',(id,)) self.conn.execute('DELETE FROM books_series_link WHERE book=?',(id,))
(series, idx) = self._get_series_values(series) series, idx = self._get_series_values(series)
books_to_refresh = {id} books_to_refresh = {id}
if series: if series:
case_change = False case_change = False
@ -3545,8 +3545,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
paths = [duplicate[0] for duplicate in duplicates] paths = [duplicate[0] for duplicate in duplicates]
formats = [duplicate[1] for duplicate in duplicates] formats = [duplicate[1] for duplicate in duplicates]
metadata = [duplicate[2] for duplicate in duplicates] metadata = [duplicate[2] for duplicate in duplicates]
return (paths, formats, metadata), (ids if return_ids else return (paths, formats, metadata), (ids if return_ids else len(ids))
len(ids))
return None, (ids if return_ids else len(ids)) return None, (ids if return_ids else len(ids))
def import_book(self, mi, formats, notify=True, import_hooks=True, def import_book(self, mi, formats, notify=True, import_hooks=True,

View File

@ -145,7 +145,7 @@ class Browser:
def has_header(x: str) -> bool: def has_header(x: str) -> bool:
x = x.lower() x = x.lower()
for (h, v) in headers: for h,v in headers:
if h.lower() == x: if h.lower() == x:
return True return True
return False return False

View File

@ -123,7 +123,7 @@ class DownloadRequest(QObject):
'final_url': qurl_to_string(self.reply.url()), 'headers': [] 'final_url': qurl_to_string(self.reply.url()), 'headers': []
} }
h = result['headers'] h = result['headers']
for (k, v) in self.reply.rawHeaderPairs(): for k,v in self.reply.rawHeaderPairs():
h.append((bytes(k).decode('utf-8', 'replace'), bytes(v).decode('utf-8', 'replace'))) h.append((bytes(k).decode('utf-8', 'replace'), bytes(v).decode('utf-8', 'replace')))
if code := self.reply.attribute(QNetworkRequest.Attribute.HttpStatusCodeAttribute): if code := self.reply.attribute(QNetworkRequest.Attribute.HttpStatusCodeAttribute):
result['http_code'] = code result['http_code'] = code
@ -206,7 +206,7 @@ class FetchBackend(QNetworkAccessManager):
timeout = req['timeout'] timeout = req['timeout']
rq.setTransferTimeout(int(timeout * 1000)) rq.setTransferTimeout(int(timeout * 1000))
rq.setRawHeader(b'User-Agent', self.current_user_agent().encode()) rq.setRawHeader(b'User-Agent', self.current_user_agent().encode())
for (name, val) in req['headers']: for name, val in req['headers']:
ex = rq.rawHeader(name) ex = rq.rawHeader(name)
if len(ex): if len(ex):
val = bytes(ex).decode() + ', ' + val val = bytes(ex).decode() + ', ' + val

View File

@ -77,7 +77,7 @@ def parse_multipart_byterange(buf, content_type): # {{{
if len(ret) != content_length: if len(ret) != content_length:
raise ValueError('Malformed sub-part, length of body not equal to length specified in Content-Range') raise ValueError('Malformed sub-part, length of body not equal to length specified in Content-Range')
buf.readline() buf.readline()
return (start, ret) return start, ret
while True: while True:
data = parse_part() data = parse_part()
if data is None: if data is None:

View File

@ -250,7 +250,7 @@ def make_with_stats(filename, outfile):
def run_batch(pairs): def run_batch(pairs):
for (filename, outfile) in pairs: for filename, outfile in pairs:
yield make_with_stats(filename, outfile) yield make_with_stats(filename, outfile)

View File

@ -288,7 +288,7 @@ class FontScanner(Thread):
continue continue
generic_family = panose_to_css_generic_family(faces[0]['panose']) generic_family = panose_to_css_generic_family(faces[0]['panose'])
if generic_family in allowed_families or generic_family == preferred_families[0]: if generic_family in allowed_families or generic_family == preferred_families[0]:
return (family, faces) return family, faces
elif generic_family not in found: elif generic_family not in found:
found[generic_family] = (family, faces) found[generic_family] = (family, faces)

View File

@ -62,7 +62,7 @@ class LigatureSubstitution(UnknownLookupSubTable):
def read_ligature(self, data): def read_ligature(self, data):
lig_glyph, count = data.unpack('HH') lig_glyph, count = data.unpack('HH')
components = data.unpack('%dH'%(count-1), single_special=False) components = data.unpack('%dH'%(count-1), single_special=False)
return (lig_glyph, components) return lig_glyph, components
def all_substitutions(self, glyph_ids): def all_substitutions(self, glyph_ids):
gid_index_map = self.coverage.coverage_indices(glyph_ids) gid_index_map = self.coverage.coverage_indices(glyph_ids)

View File

@ -99,7 +99,7 @@ def get_font_characteristics(raw, raw_is_table=False, return_all=False):
offset = struct.calcsize(common_fields) offset = struct.calcsize(common_fields)
panose = struct.unpack_from(b'>10B', os2_table, offset) panose = struct.unpack_from(b'>10B', os2_table, offset)
offset += 10 offset += 10
(range1, range2, range3, range4) = struct.unpack_from(b'>4L', os2_table, offset) range1, range2, range3, range4 = struct.unpack_from(b'>4L', os2_table, offset)
offset += struct.calcsize(b'>4L') offset += struct.calcsize(b'>4L')
vendor_id = os2_table[offset:offset+4] vendor_id = os2_table[offset:offset+4]
vendor_id vendor_id

View File

@ -1001,7 +1001,7 @@ return ``found_val``, otherwise return ``not_found_val``. If ``found_val`` and
raise ValueError(_('{} requires 2 or 4 arguments').format(self.name)) raise ValueError(_('{} requires 2 or 4 arguments').format(self.name))
l = [v.strip() for v in val.split(',') if v.strip()] l = [v.strip() for v in val.split(',') if v.strip()]
(id_, __, regexp) = ident.partition(':') id_, __, regexp = ident.partition(':')
if not id_: if not id_:
return nfv return nfv
for candidate in l: for candidate in l:

View File

@ -435,7 +435,7 @@ class SearchQueryParser:
raise ParseException(_('Recursive saved search: {0}').format(query)) raise ParseException(_('Recursive saved search: {0}').format(query))
self.searches_seen.add(search_name_lower) self.searches_seen.add(search_name_lower)
query = self._get_saved_search_text(query) query = self._get_saved_search_text(query)
return (query, search_name_lower) return query, search_name_lower
def _get_saved_search_text(self, query): def _get_saved_search_text(self, query):
try: try:

View File

@ -282,7 +282,7 @@ class SMTP:
self.debug = debug_to self.debug = debug_to
self.esmtp_features = {} self.esmtp_features = {}
if host: if host:
(code, msg) = self.connect(host, port) code, msg = self.connect(host, port)
if code != 220: if code != 220:
raise SMTPConnectError(code, msg) raise SMTPConnectError(code, msg)
if local_hostname is not None: if local_hostname is not None:
@ -346,10 +346,10 @@ class SMTP:
self.debug('connect:', (host, port)) self.debug('connect:', (host, port))
self._host = host self._host = host
self.sock = self._get_socket(host, port, self.timeout) self.sock = self._get_socket(host, port, self.timeout)
(code, msg) = self.getreply() code, msg = self.getreply()
if self.debuglevel > 0: if self.debuglevel > 0:
self.debug('connect:', msg) self.debug('connect:', msg)
return (code, msg) return code, msg
def send(self, str): def send(self, str):
'''Send `str' to the server.''' '''Send `str' to the server.'''
@ -432,9 +432,9 @@ class SMTP:
host. host.
''' '''
self.putcmd('helo', name or self.local_hostname) self.putcmd('helo', name or self.local_hostname)
(code, msg) = self.getreply() code, msg = self.getreply()
self.helo_resp = msg self.helo_resp = msg
return (code, msg) return code, msg
def ehlo(self, name=''): def ehlo(self, name=''):
''' SMTP 'ehlo' command. ''' SMTP 'ehlo' command.
@ -443,7 +443,7 @@ class SMTP:
''' '''
self.esmtp_features = {} self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname) self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply() code, msg = self.getreply()
# According to RFC1869 some (badly written) # According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if # MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm # that happens -ddm
@ -452,7 +452,7 @@ class SMTP:
raise SMTPServerDisconnected('Server not connected') raise SMTPServerDisconnected('Server not connected')
self.ehlo_resp = msg self.ehlo_resp = msg
if code != 250: if code != 250:
return (code, msg) return code, msg
self.does_esmtp = 1 self.does_esmtp = 1
# parse the ehlo response -ddm # parse the ehlo response -ddm
resp = self.ehlo_resp.split('\n') resp = self.ehlo_resp.split('\n')
@ -484,7 +484,7 @@ class SMTP:
+ ' ' + params + ' ' + params
else: else:
self.esmtp_features[feature] = params self.esmtp_features[feature] = params
return (code, msg) return code, msg
def has_extn(self, opt): def has_extn(self, opt):
'''Does the server support a given SMTP service extension?''' '''Does the server support a given SMTP service extension?'''
@ -529,7 +529,7 @@ class SMTP:
response code received when the all data is sent. response code received when the all data is sent.
''' '''
self.putcmd('data') self.putcmd('data')
(code, repl) = self.getreply() code, repl = self.getreply()
if self.debuglevel > 0: if self.debuglevel > 0:
self.debug('data:', (code, repl)) self.debug('data:', (code, repl))
if code != 354: if code != 354:
@ -540,10 +540,10 @@ class SMTP:
q = q + CRLF q = q + CRLF
q = q + '.' + CRLF q = q + '.' + CRLF
self.send(q) self.send(q)
(code, msg) = self.getreply() code, msg = self.getreply()
if self.debuglevel > 0: if self.debuglevel > 0:
self.debug('data:', (code, msg)) self.debug('data:', (code, msg))
return (code, msg) return code, msg
def verify(self, address): def verify(self, address):
'''SMTP 'verify' command -- checks for address validity.''' '''SMTP 'verify' command -- checks for address validity.'''
@ -572,7 +572,7 @@ class SMTP:
''' '''
if self.helo_resp is None and self.ehlo_resp is None: if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299): if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo() code, resp = self.helo()
if not (200 <= code <= 299): if not (200 <= code <= 299):
raise SMTPHeloError(code, resp) raise SMTPHeloError(code, resp)
@ -633,27 +633,27 @@ class SMTP:
break break
if authmethod == AUTH_CRAM_MD5: if authmethod == AUTH_CRAM_MD5:
(code, resp) = self.docmd('AUTH', AUTH_CRAM_MD5) code, resp = self.docmd('AUTH', AUTH_CRAM_MD5)
if code == 503: if code == 503:
# 503 == 'Error: already authenticated' # 503 == 'Error: already authenticated'
return (code, resp) return code, resp
(code, resp) = self.docmd(encode_cram_md5(resp, user, password)) code, resp = self.docmd(encode_cram_md5(resp, user, password))
elif authmethod == AUTH_PLAIN: elif authmethod == AUTH_PLAIN:
(code, resp) = self.docmd('AUTH', code, resp = self.docmd('AUTH',
AUTH_PLAIN + ' ' + encode_plain(user, password)) AUTH_PLAIN + ' ' + encode_plain(user, password))
elif authmethod == AUTH_LOGIN: elif authmethod == AUTH_LOGIN:
(code, resp) = self.docmd('AUTH', code, resp = self.docmd('AUTH',
'{} {}'.format(AUTH_LOGIN, encode_base64(user, eol=''))) '{} {}'.format(AUTH_LOGIN, encode_base64(user, eol='')))
if code != 334: if code != 334:
raise SMTPAuthenticationError(code, resp) raise SMTPAuthenticationError(code, resp)
(code, resp) = self.docmd(encode_base64(password, eol='')) code, resp = self.docmd(encode_base64(password, eol=''))
elif authmethod is None: elif authmethod is None:
raise SMTPException('No suitable authentication method found.') raise SMTPException('No suitable authentication method found.')
if code not in (235, 503): if code not in (235, 503):
# 235 == 'Authentication successful' # 235 == 'Authentication successful'
# 503 == 'Error: already authenticated' # 503 == 'Error: already authenticated'
raise SMTPAuthenticationError(code, resp) raise SMTPAuthenticationError(code, resp)
return (code, resp) return code, resp
def starttls(self, context=None): def starttls(self, context=None):
'''Puts the connection to the SMTP server into TLS mode. '''Puts the connection to the SMTP server into TLS mode.
@ -675,7 +675,7 @@ class SMTP:
self.ehlo_or_helo_if_needed() self.ehlo_or_helo_if_needed()
if not self.has_extn('starttls'): if not self.has_extn('starttls'):
raise SMTPException('STARTTLS extension not supported by server.') raise SMTPException('STARTTLS extension not supported by server.')
(resp, reply) = self.docmd('STARTTLS') resp, reply = self.docmd('STARTTLS')
if resp == 220: if resp == 220:
if not _have_ssl: if not _have_ssl:
raise RuntimeError('No SSL support included in this Python') raise RuntimeError('No SSL support included in this Python')
@ -697,7 +697,7 @@ class SMTP:
# 501 Syntax error (no parameters allowed) # 501 Syntax error (no parameters allowed)
# 454 TLS not available due to temporary reason # 454 TLS not available due to temporary reason
raise SMTPResponseException(resp, reply) raise SMTPResponseException(resp, reply)
return (resp, reply) return resp, reply
def sendmail(self, from_addr, to_addrs, msg, mail_options=[], def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]): rcpt_options=[]):
@ -766,7 +766,7 @@ class SMTP:
for option in mail_options: for option in mail_options:
esmtp_opts.append(option) esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts) code, resp = self.mail(from_addr, esmtp_opts)
if code != 250: if code != 250:
self.rset() self.rset()
raise SMTPSenderRefused(code, resp, from_addr) raise SMTPSenderRefused(code, resp, from_addr)
@ -774,14 +774,14 @@ class SMTP:
if isinstance(to_addrs, string_or_bytes): if isinstance(to_addrs, string_or_bytes):
to_addrs = [to_addrs] to_addrs = [to_addrs]
for each in to_addrs: for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options) code, resp = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251): if (code != 250) and (code != 251):
senderrs[each] = (code, resp) senderrs[each] = (code, resp)
if len(senderrs) == len(to_addrs): if len(senderrs) == len(to_addrs):
# the server refused all our recipients # the server refused all our recipients
self.rset() self.rset()
raise SMTPRecipientsRefused(senderrs) raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg) code, resp = self.data(msg)
if code != 250: if code != 250:
self.rset() self.rset()
raise SMTPDataError(code, resp) raise SMTPDataError(code, resp)
@ -885,10 +885,10 @@ class LMTP(SMTP):
self.sock.close() self.sock.close()
self.sock = None self.sock = None
raise raise
(code, msg) = self.getreply() code, msg = self.getreply()
if self.debuglevel > 0: if self.debuglevel > 0:
self.debug('connect:', msg) self.debug('connect:', msg)
return (code, msg) return code, msg
# Test the sendmail method, which tests most of the others. # Test the sendmail method, which tests most of the others.

View File

@ -1623,7 +1623,7 @@ class PyZipFile(ZipFile):
archivename = os.path.split(fname)[1] archivename = os.path.split(fname)[1]
if basename: if basename:
archivename = f'{basename}/{archivename}' archivename = f'{basename}/{archivename}'
return (fname, archivename) return fname, archivename
def extractall(source, dest): def extractall(source, dest):

View File

@ -326,7 +326,7 @@ class FeedCollection(list):
for j, f in enumerate(self): for j, f in enumerate(self):
for i, a in enumerate(f): for i, a in enumerate(f):
if a is article: if a is article:
return (j, i) return j, i
def restore_duplicates(self): def restore_duplicates(self):
temp = [] temp = []

View File

@ -211,7 +211,7 @@ class RecipeModel(QAbstractItemModel, AdaptSQP):
script_ids = [] script_ids = []
for urn, title_script in iteritems(script_urn_map): for urn, title_script in iteritems(script_urn_map):
id_ = int(urn[len('custom:'):]) id_ = int(urn[len('custom:'):])
(title, script) = title_script title, script = title_script
script_ids.append((id_, title, script)) script_ids.append((id_, title, script))
update_custom_recipes(script_ids) update_custom_recipes(script_ids)

View File

@ -69,7 +69,7 @@ class LoadParser(handler.ContentHandler):
self.data = [] self.data = []
# Create the element # Create the element
attrdict = {} attrdict = {}
for (att,value) in attrs.items(): for att,value in attrs.items():
attrdict[att] = value attrdict[att] = value
try: try:
e = Element(qname=tag, qattributes=attrdict, check_grammar=False) e = Element(qname=tag, qattributes=attrdict, check_grammar=False)