more string concatenation (extra-edit)

This commit is contained in:
un-pogaz 2025-01-24 11:14:25 +01:00
parent 8810c94933
commit f20aa37527
67 changed files with 172 additions and 241 deletions

View File

@ -358,9 +358,9 @@ class Economist(BasicNewsRecipe):
cleanup_html_article(root)
if '/interactive/' in url:
return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \
+ 'This article is supposed to be read in a browser' \
+ '</em></article></body></html>'
return ('<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>'
'This article is supposed to be read in a browser.'
'</em></article></body></html>')
for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript'))

View File

@ -85,9 +85,10 @@ class ADRecipe(BasicNewsRecipe):
def print_version(self, url):
parts = url.split('/')
print_url = 'http://' + parts[2] + '/' + parts[3] + '/' + parts[4] + '/' + parts[5] + '/' \
+ parts[10] + '/' + parts[7] + '/print/' + \
parts[8] + '/' + parts[9] + '/' + parts[13]
print_url = 'http://' + '/'.join([
parts[2], parts[3], parts[4], parts[5], parts[10],
parts[7], 'print', parts[8], parts[9], parts[13],
])
return print_url

View File

@ -203,11 +203,10 @@ class AlMonitor(BasicNewsRecipe):
return self.tag_to_string(n).strip()
def _dbg_soup_node(self, node):
s = ' cls: ' + str(node.get('class')).strip() + \
' id: ' + str(node.get('id')).strip() + \
' role: ' + str(node.get('role')).strip() + \
' txt: ' + self.text(node)
return s
return (' cls: ' + str(node.get('class')).strip() +
' id: ' + str(node.get('id')).strip() +
' role: ' + str(node.get('role')).strip() +
' txt: ' + self.text(node))
def _p(self, msg):
curframe = inspect.currentframe()

View File

@ -49,8 +49,7 @@ class BillOReilly(BasicNewsRecipe):
continue
if url.startswith('/'):
url = 'http://www.billoreilly.com' + url + \
'&dest=/pg/jsp/community/tvshowprint.jsp'
url = 'http://www.billoreilly.com' + url + '&dest=/pg/jsp/community/tvshowprint.jsp'
self.log('\t\tFound article:', title)
self.log('\t\t\t', url)

View File

@ -41,21 +41,20 @@ class brewiarz(BasicNewsRecipe):
url_date_weekday = url_date.strftime('%A')
url_date_weekday_pl = weekday_dict[url_date_weekday]
url = 'http://brewiarz.pl/' + url_date_month_roman + '_' + \
url_date_year + '/' + url_date_day + url_date_month + '/index.php3'
url = ('http://brewiarz.pl/' + url_date_month_roman + '_' +
url_date_year + '/' + url_date_day + url_date_month + '/index.php3')
articles = self.parse_pages(url)
if articles:
title = url_date_weekday_pl + ' ' + url_date_day + \
'.' + url_date_month + '.' + url_date_year
title = (url_date_weekday_pl + ' ' + url_date_day +
'.' + url_date_month + '.' + url_date_year)
feeds.append((title, articles))
else:
sectors = self.get_sectors(url)
for subpage in sectors:
title = url_date_weekday_pl + ' ' + url_date_day + '.' + \
url_date_month + '.' + url_date_year + ' - ' + subpage.string
url = 'http://brewiarz.pl/' + url_date_month_roman + '_' + url_date_year + \
'/' + url_date_day + url_date_month + \
'/' + subpage['href']
title = (url_date_weekday_pl + ' ' + url_date_day + '.' +
url_date_month + '.' + url_date_year + ' - ' + subpage.string)
url = ('http://brewiarz.pl/' + url_date_month_roman + '_' + url_date_year +
'/' + url_date_day + url_date_month + '/' + subpage['href'])
print(url)
articles = self.parse_pages(url)
if articles:

View File

@ -154,8 +154,7 @@ class CanWestPaper(BasicNewsRecipe):
except:
while daysback < 7:
cover = 'http://webmedia.newseum.org/newseum-multimedia/dfp/jpg' + \
str((date.today() - timedelta(days=daysback)).day) + \
'/lg/' + self.fp_tag + '.jpg'
str((date.today() - timedelta(days=daysback)).day) + '/lg/' + self.fp_tag + '.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -213,6 +213,6 @@ class CaravanMagazine(BasicNewsRecipe):
for x in art_cont['premiumContent']:
premium_cont += '\n' + ''.join(parse_body(x))
return '<html><body><div>' \
+ cat + title + desc + auth + lede + free_cont + premium_cont + \
'</div></body></html>'
return ('<html><body><div>'
+ cat + title + desc + auth + lede + free_cont + premium_cont +
'</div></body></html>')

View File

@ -5,9 +5,9 @@ from calibre.web.feeds.news import BasicNewsRecipe
class CATOInstitute(BasicNewsRecipe):
title = u'The CATO Institute'
description = 'The Cato Institute is a public policy research organization — a think tank — \
dedicated to the principles of individual liberty, limited government, free markets and peace.\
Its scholars and analysts conduct independent, nonpartisan research on a wide range of policy issues.'
description = ('The Cato Institute is a public policy research organization — a think tank — '
'dedicated to the principles of individual liberty, limited government, free markets and peace. '
'Its scholars and analysts conduct independent, nonpartisan research on a wide range of policy issues.')
__author__ = '_reader'
__date__ = '05 July 2012'
__version__ = '1.0'

View File

@ -2,8 +2,8 @@
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>\
2015, Rémi Vanicat <vanicat at debian.org'
__copyright__ = ('2009, Mathieu Godlewski <mathieu at godlewski.fr>, '
'2015, Rémi Vanicat <vanicat at debian.org')
'''
Courrier International
'''

View File

@ -36,9 +36,8 @@ class DeGentenaarOnline(BasicNewsRecipe):
'--comment', description, '--category', category, '--publisher', publisher
]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + \
category + \
'"\noverride_css=" p {text-indent: 0cm; margin-top: 0em; margin-bottom: 0.5em} "'
html2epub_options = ('publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category +
'"\noverride_css=" p {text-indent: 0cm; margin-top: 0em; margin-bottom: 0.5em} "')
keep_only_tags = [dict(name='span', attrs={
'id': ['lblArticleTitle', 'lblArticleIntroduction', 'lblArticleMainText']})]

View File

@ -104,8 +104,7 @@ class DiscoverMagazine(BasicNewsRecipe):
if cover is not None:
img = cover.find('img', src=True)
if img is not None:
self.cover_url = 'http://www.discovermagazine.com' + \
img['src'].replace(' ', '%20') # [:-7]
self.cover_url = 'http://www.discovermagazine.com' + img['src'].replace(' ', '%20') # [:-7]
# parse articles
for tag in col.findAll(name=['h3', 'div'], attrs={'class': ['bottomBorder', 'headline']}):
if tag.name == 'h3':

View File

@ -52,16 +52,14 @@ class DziennikWschodni(BasicNewsRecipe):
self.INDEX + '/apps/pbcs.dll/section?Category=JEDYNKI')
nexturl = self.INDEX + soup.find(id='covers').find('a')['href']
soup = self.index_to_soup(nexturl)
self.cover_url = self.INDEX + \
soup.find(id='cover').find(name='img')['src']
self.cover_url = self.INDEX + soup.find(id='cover').find(name='img')['src']
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):
tag = soup.find('span', attrs={'class': 'photoNavigationPages'})
if tag:
number = int(tag.string.rpartition('/')[-1].replace('&nbsp;', ''))
baseurl = self.INDEX + \
soup.find(attrs={'class': 'photoNavigationNext'})['href'][:-1]
baseurl = self.INDEX + soup.find(attrs={'class': 'photoNavigationNext'})['href'][:-1]
for r in appendtag.findAll(attrs={'class': 'photoNavigation'}):
r.extract()

View File

@ -409,9 +409,9 @@ class Economist(BasicNewsRecipe):
load_article_from_json(raw, root)
if '/interactive/' in url:
return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \
+ 'This article is supposed to be read in a browser' \
+ '</em></article></body></html>'
return ('<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>'
'This article is supposed to be read in a browser.'
'</em></article></body></html>')
for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript'))
@ -555,12 +555,9 @@ class Economist(BasicNewsRecipe):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
root_ = parse(raw)
if '/interactive/' in url:
return (
'<html><body><article><h1>'
+ root_.xpath('//h1')[0].text + '</h1><em>'
+ 'This article is supposed to be read in a browser'
+ '</em></article></body></html>'
)
return ('<html><body><article><h1>' + root_.xpath('//h1')[0].text + '</h1><em>'
'This article is supposed to be read in a browser'
'</em></article></body></html>')
script = root_.xpath('//script[@id="__NEXT_DATA__"]')

View File

@ -409,9 +409,9 @@ class Economist(BasicNewsRecipe):
load_article_from_json(raw, root)
if '/interactive/' in url:
return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \
+ 'This article is supposed to be read in a browser' \
+ '</em></article></body></html>'
return ('<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>'
'This article is supposed to be read in a browser.'
'</em></article></body></html>')
for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript'))
@ -555,12 +555,9 @@ class Economist(BasicNewsRecipe):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
root_ = parse(raw)
if '/interactive/' in url:
return (
'<html><body><article><h1>'
+ root_.xpath('//h1')[0].text + '</h1><em>'
+ 'This article is supposed to be read in a browser'
+ '</em></article></body></html>'
)
return ('<html><body><article><h1>' + root_.xpath('//h1')[0].text + '</h1><em>'
'This article is supposed to be read in a browser'
'</em></article></body></html>')
script = root_.xpath('//script[@id="__NEXT_DATA__"]')

View File

@ -276,9 +276,9 @@ class EconomistNews(BasicNewsRecipe):
load_article_from_json(raw, root)
if '/interactive/' in url:
return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \
+ 'This article is supposed to be read in a browser' \
+ '</em></article></body></html>'
return ('<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>'
'This article is supposed to be read in a browser.'
'</em></article></body></html>')
for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript'))

View File

@ -169,12 +169,9 @@ class econ_search(BasicNewsRecipe):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
root_ = parse(raw)
if '/interactive/' in url:
return (
'<html><body><article><h1>'
+ root_.xpath('//h1')[0].text + '</h1><em>'
+ 'This article is supposed to be read in a browser'
+ '</em></article></body></html>'
)
return ('<html><body><article><h1>' + root_.xpath('//h1')[0].text + '</h1><em>'
'This article is supposed to be read in a browser.'
'</em></article></body></html>')
script = root_.xpath('//script[@id="__NEXT_DATA__"]')

View File

@ -245,12 +245,9 @@ class EconomistWorld(BasicNewsRecipe):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
root_ = parse(raw)
if '/interactive/' in url:
return (
'<html><body><article><h1>'
+ root_.xpath('//h1')[0].text + '</h1><em>'
+ 'This article is supposed to be read in a browser'
+ '</em></article></body></html>'
)
return ('<html><body><article><h1>' + root_.xpath('//h1')[0].text + '</h1><em>'
'This article is supposed to be read in a browser.'
'</em></article></body></html>')
script = root_.xpath('//script[@id="__NEXT_DATA__"]')

View File

@ -154,8 +154,7 @@ class CanWestPaper(BasicNewsRecipe):
except:
while daysback < 7:
cover = 'http://webmedia.newseum.org/newseum-multimedia/dfp/jpg' + \
str((date.today() - timedelta(days=daysback)).day) + \
'/lg/' + self.fp_tag + '.jpg'
str((date.today() - timedelta(days=daysback)).day) + '/lg/' + self.fp_tag + '.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -85,8 +85,7 @@ class ElMundo(BasicNewsRecipe):
year = str(st.tm_year)
month = '%.2d' % st.tm_mon
day = '%.2d' % st.tm_mday
cover = 'http://img.kiosko.net/' + year + '/' + \
month + '/' + day + '/es/elmundo.750.jpg'
cover = 'http://img.kiosko.net/' + '/'.join([year, month, day]) + '/es/elmundo.750.jpg'
try:
self.browser.open(cover)
except:

View File

@ -36,8 +36,8 @@ class ElPeriodico_cat(BasicNewsRecipe):
'--comment', description, '--category', category, '--publisher', publisher
]
html2epub_options = 'publisher="' + publisher + \
'"\ncomments="' + description + '"\ntags="' + category + '"'
html2epub_options = ('publisher="' + publisher +
'"\ncomments="' + description + '"\ntags="' + category + '"')
feeds = [(u'Portada', u'http://www.elperiodico.com/es/rss/rss_portada.xml'),
(u'Internacional', u'http://elperiodico.com/es/rss/internacional/rss.xml'),

View File

@ -53,8 +53,7 @@ class Esensja(BasicNewsRecipe):
month = a['href'].split('/')[1]
self.HREF = 'http://www.esensja.pl/magazyn/' + year + '/' + month + '/iso/'
soup = self.index_to_soup(self.HREF + '01.html')
self.cover_url = 'http://www.esensja.pl/magazyn/' + \
year + '/' + month + '/img/ilustr/cover_b.jpg'
self.cover_url = 'http://www.esensja.pl/magazyn/' + year + '/' + month + '/img/ilustr/cover_b.jpg'
feeds = []
chapter = ''
subchapter = ''

View File

@ -51,9 +51,9 @@ class EsensjaRSS(BasicNewsRecipe):
def get_cover_url(self):
soup = self.index_to_soup(self.INDEX)
cover = soup.find(id='panel_1')
self.cover_url = self.INDEX + \
cover.find('a')['href'].replace(
'index.html', '') + 'img/ilustr/cover_b.jpg'
self.cover_url = (self.INDEX
+ cover.find('a')['href'].replace('index.html', '')
+ 'img/ilustr/cover_b.jpg')
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):

View File

@ -108,8 +108,7 @@ class expansion_spanish(BasicNewsRecipe):
year = str(st.tm_year)
month = '%.2d' % st.tm_mon
day = '%.2d' % st.tm_mday
cover = 'http://img5.kiosko.net/' + year + '/' + \
month + '/' + day + '/es/expansion.750.jpg'
cover = 'http://img5.kiosko.net/' + '/'.join([year, month, day]) + '/es/expansion.750.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -2,8 +2,8 @@
from __future__ import unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2011, Piotr Kontek, piotr.kontek@gmail.com \
2013-2018, Tomasz Długosz, tomek3d@gmail.com'
__copyright__ = ('2011, Piotr Kontek, piotr.kontek@gmail.com '
'2013-2018, Tomasz Długosz, tomek3d@gmail.com')
import re
import time

View File

@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2011, Piotr Kontek, piotr.kontek@gmail.com \
2013-2018, Tomasz Długosz, tomek3d@gmail.com'
__copyright__ = ('2011, Piotr Kontek, piotr.kontek@gmail.com '
'2013-2018, Tomasz Długosz, tomek3d@gmail.com')
import re

View File

@ -33,8 +33,7 @@ class GryOnlinePl(BasicNewsRecipe):
url_part = soup.find('link', attrs={'rel': 'canonical'})['href']
url_part = url_part[25:].rpartition('?')[0]
for nexturl in nexturls[1:-1]:
finalurl = 'http://www.gry-online.pl/' + \
url_part + nexturl['href']
finalurl = 'http://www.gry-online.pl/' + url_part + nexturl['href']
for i in range(10):
try:
soup2 = self.index_to_soup(finalurl)
@ -69,8 +68,7 @@ class GryOnlinePl(BasicNewsRecipe):
break
else:
nexturl = tag.a
finalurl = 'http://www.gry-online.pl/' + \
url_part + nexturl['href']
finalurl = 'http://www.gry-online.pl/' + url_part + nexturl['href']
for i in range(10):
try:
soup2 = self.index_to_soup(finalurl)

View File

@ -71,9 +71,7 @@ class HNWithCommentsLink(BasicNewsRecipe):
for td in main.findAll('td', 'default'):
comhead = td.find('span', 'comhead')
if comhead:
com_title = u'<h4>' + \
self.tag_to_string(comhead).replace(
' | link', '') + u'</h4>'
com_title = u'<h4>' + self.tag_to_string(comhead).replace(' | link', '') + u'</h4>'
comhead.parent.extract()
br = td.find('br')
if br:
@ -86,8 +84,8 @@ class HNWithCommentsLink(BasicNewsRecipe):
td['style'] = 'padding-left: ' + str(indent_width) + 'px'
comments = comments + com_title + td.prettify()
body = u'<h3>' + title + u'</h3><p><a href="' + link + u'">' + readable_link + \
u'</a><br/><strong>' + subtext + u'</strong></p>' + title_content + u'<br/>'
body = (u'<h3>' + title + u'</h3><p><a href="' + link + u'">' + readable_link +
u'</a><br/><strong>' + subtext + u'</strong></p>' + title_content + u'<br/>')
body = body + comments
return u'<html><title>' + title + u'</title><body>' + body + '</body></html>'
@ -114,8 +112,7 @@ class HNWithCommentsLink(BasicNewsRecipe):
else:
content = self.get_readable_content(url)
# content = re.sub(r'</body>\s*</html>\s*$', '', content) + \
# article.summary + '</body></html>'
# content = re.sub(r'</body>\s*</html>\s*$', '', content) + article.summary + '</body></html>'
if not isinstance(content, bytes):
content = content.encode('utf-8')

View File

@ -70,15 +70,12 @@ class HistoryToday(BasicNewsRecipe):
if len(subarticle) < 2:
continue
title = self.tag_to_string(subarticle[0])
originalurl = 'https://www.historytoday.com' + \
subarticle[0].span.a['href'].strip()
originalurl = 'https://www.historytoday.com' + subarticle[0].span.a['href'].strip()
originalpage = self.index_to_soup(originalurl)
printurl = originalpage.find(
'div', attrs={'id': 'ht-tools'}).a['href'].strip()
printurl = originalpage.find('div', attrs={'id': 'ht-tools'}).a['href'].strip()
url = 'https://www.historytoday.com' + printurl
desc = self.tag_to_string(subarticle[1])
articles.append({'title': title, 'url': url,
'description': desc, 'date': ''})
articles.append({'title': title, 'url': url,'description': desc, 'date': ''})
if articles:
if section_title not in feeds:

View File

@ -40,8 +40,8 @@ class infzm(BasicNewsRecipe):
def get_obfuscated_article(self, url):
br = self.get_browser()
link = url
res_link = link.replace('https://www.infzm.com', 'https://api.infzm.com/mobile') \
+ '?platform=wap&version=1.89.0&machine_id=35458aa29603f2b246636e5492122b50&user_id=&token=&member_type='
res_link = (link.replace('https://www.infzm.com', 'https://api.infzm.com/mobile') +
'?platform=wap&version=1.89.0&machine_id=35458aa29603f2b246636e5492122b50&user_id=&token=&member_type=')
# if article is paywalled, add code to figure out machine_id
raw = br.open(res_link).read()
html = json_to_html(raw, link)

View File

@ -45,8 +45,7 @@ class IlMessaggero(BasicNewsRecipe):
year = str(st.tm_year)
month = '%.2d' % st.tm_mon
day = '%.2d' % st.tm_mday
cover = 'http://carta.ilmessaggero.it/' + year + \
month + day + '/jpeg/MSGR_20_CITTA_1.jpg'
cover = 'http://carta.ilmessaggero.it/' + year + month + day + '/jpeg/MSGR_20_CITTA_1.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -43,8 +43,7 @@ class IlManifesto(BasicNewsRecipe):
def get_cover_url(self):
self._set_manifesto_index()
url = MANIFESTO_BASEURL + \
'fileadmin/archivi/in_edicola/%sprimapagina.gif' % self.manifesto_datestr
url = MANIFESTO_BASEURL + 'fileadmin/archivi/in_edicola/%sprimapagina.gif' % self.manifesto_datestr
return url
def parse_index(self):

View File

@ -22,11 +22,11 @@ class TheIndependentNew(BasicNewsRecipe):
title = u'The Independent'
__author__ = 'Krittika Goyal'
description = 'The latest in UK News and World News from The \
Independent. Wide range of international and local news, sports \
news, commentary and opinion pieces.Independent News - Breaking news \
that matters. Your daily comprehensive news source - The \
Independent Newspaper'
description = ('The latest in UK News and World News from The '
'Independent. Wide range of international and local news, sports '
'news, commentary and opinion pieces.Independent News - Breaking news '
'that matters. Your daily comprehensive news source - The '
'Independent Newspaper')
publisher = 'The Independent'
oldest_article = 2.0
ignore_duplicate_articles = {'title', 'url'}

View File

@ -114,7 +114,6 @@ class IndiaToday(BasicNewsRecipe):
if 'image_caption' in data:
imagecap = '<div id="imgcap">' + data['image_caption'] + '</div>'
html = '<html><body>' + slug + '<h1>' + title + '</h1>\n' + desc + '<div id="author">'\
+ author + '<span> ' + city + ' UPDATED: ' + date + '</span></div>\n' + image + imagecap + body\
+ '</body></html>'
return html
return ('<html><body>' + slug + '<h1>' + title + '</h1>\n' + desc + '<div id="author">'
+ author + '<span> ' + city + ' UPDATED: ' + date + '</span></div>\n' + image + imagecap + body +
'</body></html>')

View File

@ -154,8 +154,7 @@ class CanWestPaper(BasicNewsRecipe):
except:
while daysback < 7:
cover = 'http://webmedia.newseum.org/newseum-multimedia/dfp/jpg' + \
str((date.today() - timedelta(days=daysback)).day) + \
'/lg/' + self.fp_tag + '.jpg'
str((date.today() - timedelta(days=daysback)).day) + '/lg/' + self.fp_tag + '.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -45,8 +45,7 @@ class NRCNext(BasicNewsRecipe):
raise ValueError('Failed to login, check username and password')
epubraw = None
for today in (date.today(), date.today() - timedelta(days=1),):
url = 'http://digitaleeditie.nrc.nl/digitaleeditie/helekrant/epub/nn_%s.epub' \
% today.strftime('%Y%m%d')
url = 'http://digitaleeditie.nrc.nl/digitaleeditie/helekrant/epub/nn_%s.epub' % today.strftime('%Y%m%d')
self.log('Trying to download epub from:', url)
try:
response3 = br.open(url, timeout=60)

View File

@ -154,8 +154,7 @@ class CanWestPaper(BasicNewsRecipe):
except:
while daysback < 7:
cover = 'http://webmedia.newseum.org/newseum-multimedia/dfp/jpg' + \
str((date.today() - timedelta(days=daysback)).day) + \
'/lg/' + self.fp_tag + '.jpg'
str((date.today() - timedelta(days=daysback)).day) + '/lg/' + self.fp_tag + '.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -128,8 +128,8 @@ class AdvancedUserRecipe1277129332(BasicNewsRecipe):
year = time.strftime('%Y')
month = time.strftime('%m')
day = time.strftime('%d')
cover = 'http://paper.people.com.cn/rmrb/images/' + year + '-' + \
month + '/' + day + '/01/rmrb' + year + month + day + '01_b.jpg'
cover = ('http://paper.people.com.cn/rmrb/images/' + year + '-'
+ month + '/' + day + '/01/rmrb' + year + month + day + '01_b.jpg')
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -25,8 +25,7 @@ class AdvancedUserRecipe(BasicNewsRecipe):
def print_version(self, url):
segments = url.split('/')
printURL = 'http://www.presseportal.de/print.htx?nr=' + \
'/'.join(segments[5:6]) + '&type=polizei'
printURL = 'http://www.presseportal.de/print.htx?nr=' + '/'.join(segments[5:6]) + '&type=polizei'
return printURL
feeds = [(u'Frimmerdorf', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-frimmersdorf&w=public_service'),

View File

@ -23,8 +23,7 @@ class PressePortalDE(BasicNewsRecipe):
description = u'Presseportal News Feed DE'
# add date to description so for dayly downloads you can find them easier
# ---- can be edit by user
description = description + ' fetched: ' + \
datetime.now().strftime('%Y-%m-%d') # %H:%M:%S")
description = description + ' fetched: ' + datetime.now().strftime('%Y-%m-%d') # %H:%M:%S")
# Who published the content?
publisher = u'Presseportal.de'
# What is the content of?

View File

@ -59,9 +59,9 @@ class Pocket(BasicNewsRecipe):
fail loudly if it's missing from the config.
'''
br = BasicNewsRecipe.get_browser(self,
user_agent='Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; \
en-us) AppleWebKit/533.19.4 (KHTML, like Gecko) \
Version/5.0.3 Safari/533.19.4')
user_agent='''Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-us)
AppleWebKit/533.19.4 (KHTML, like Gecko)
Version/5.0.3 Safari/533.19.4''')
if self.username is not None and self.password is not None:
br.open(self.legacy_login_url)
br.select_form(nr=0)
@ -155,8 +155,7 @@ class Pocket(BasicNewsRecipe):
'''
try:
from calibre.ebooks.covers import calibre_cover2
title = self.title if isinstance(self.title, type(u'')) else \
self.title.decode('utf-8', 'replace')
title = self.title if isinstance(self.title, type(u'')) else self.title.decode('utf-8', 'replace')
date = strftime(self.timefmt)
time = strftime('[%I:%M %p]')
img_data = calibre_cover2(title, date, time)

View File

@ -113,8 +113,7 @@ class CanWestPaper(BasicNewsRecipe):
except:
while daysback < 7:
cover = 'http://webmedia.newseum.org/newseum-multimedia/dfp/jpg' + \
str((date.today() - timedelta(days=daysback)).day) + \
'/lg/' + self.fp_tag + '.jpg'
str((date.today() - timedelta(days=daysback)).day) + '/lg/' + self.fp_tag + '.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -113,8 +113,7 @@ class CanWestPaper(BasicNewsRecipe):
except:
while daysback < 7:
cover = 'http://webmedia.newseum.org/newseum-multimedia/dfp/jpg' + \
str((date.today() - timedelta(days=daysback)).day) + \
'/lg/' + self.fp_tag + '.jpg'
str((date.today() - timedelta(days=daysback)).day) + '/lg/' + self.fp_tag + '.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -59,8 +59,7 @@ def load_article_from_json(raw, root):
# E(article, 'p', replace_entities(data['subHeadline']['text']), style='font-style: italic; color:#202020;')
for subh in data['subHeadline']['json']:
process_node(subh, article)
auth = ts_date(data['publishedDate']) + ' | ' + str(data.get('readingTime', '')) + ' min read | ' \
+ ', '.join([a['name'] for a in data['authors']])
auth = ts_date(data['publishedDate']) + ' | ' + str(data.get('readingTime', '')) + ' min read | ' + ', '.join([a['name'] for a in data['authors']])
E(article, 'p', auth, style='color: #202020; font-size:small;')
main_image_url = sub_img = ''
for l in data['images']:

View File

@ -28,8 +28,8 @@ class ScottHanselman(BasicNewsRecipe):
'--comment', description, '--category', category, '--publisher', publisher, '--author', author
]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + \
description + '"\ntags="' + category + '"\nauthors="' + author + '"'
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + \
'"\ntags="' + category + '"\nauthors="' + author + '"'
remove_tags = [
dict(name=['object', 'link']), dict(

View File

@ -5,8 +5,7 @@ from calibre.web.feeds.recipes import BasicNewsRecipe
class PhilippineDailyInquirer(BasicNewsRecipe):
title = 'The Philippine Daily Inquirer'
custom_title = 'The Philippine Daily Inquirer - ' + \
time.strftime('%d %b %Y %I:%M %p')
custom_title = 'The Philippine Daily Inquirer - ' + time.strftime('%d %b %Y %I:%M %p')
__author__ = 'jde'
__date__ = '03 June 2012'
__version__ = '1.0'

View File

@ -33,8 +33,8 @@ class TheEconomicTimes(BasicNewsRecipe):
yr = str(date.today().year)
mn = date.today().strftime('%m')
dy = date.today().strftime('%d')
cover = 'https://asset.harnscloud.com/PublicationData/ET/etbg/'\
+ yr + '/' + mn + '/' + dy + '/Page/' + dy + '_' + mn + '_' + yr + '_001_etbg.jpg'
cover = ('https://asset.harnscloud.com/PublicationData/ET/etbg/'
+ yr + '/' + mn + '/' + dy + '/Page/' + dy + '_' + mn + '_' + yr + '_001_etbg.jpg')
self.log('cover_url ', cover)
br = BasicNewsRecipe.get_browser(self)
try:

View File

@ -138,14 +138,14 @@ class TLS(BasicNewsRecipe):
i = data['article_data_leadimage']
if i.get('full_image'):
lede = '<br><img src="{}"><div class="figc">{}</div>'.format(
i['full_image'] + '?w600', i['imagecaption'] + ' <i>' \
+ i['imagecredit'] + '</i>'
i['full_image'] + '?w600',
i['imagecaption'] + ' <i>' + i['imagecredit'] + '</i>'
)
cont = self.index_to_soup('https://www.the-tls.co.uk/wp-json/wp/v2/tls_articles/' + data['ID'], raw=True)
c_data = json.loads(cont)
body = c_data['content']['rendered']
html = '<html><body><div>' \
+ label + title + desc + auth + lede + bks + body + \
'</div></body></html>'
html = ('<html><body><div>'
+ label + title + desc + auth + lede + bks + body +
'</div></body></html>')
return BeautifulSoup(html).prettify()

View File

@ -63,8 +63,8 @@ class toiprint(BasicNewsRecipe):
'''
def get_cover_url(self):
cover = 'https://asset.harnscloud.com/PublicationData/TOI/' + le + '/' \
+ date0 + '/Page/' + date_ + '_001_' + le + '.jpg'
cover = ('https://asset.harnscloud.com/PublicationData/TOI/'
+ le + '/' + date0 + '/Page/' + date_ + '_001_' + le + '.jpg')
self.log('cover_url ', cover)
return cover
@ -135,17 +135,18 @@ class toiprint(BasicNewsRecipe):
body += '<p class="auth"><i>' + x['ZoneText'] + '</i></p>'
elif x['TagName'] == 'Photographs':
pag = x['ZoneID'].split('_')[-4]
body += '<div><img src="{}"></div>'.format(img_index + '/Photographs/' + pag + '/' \
+ x['ZoneID'] + '.jpg&bucket=andre-toi-out&q=50')
body += '<div><img src="{}"></div>'.format(
'/'.join([img_index, 'Photographs', pag, x['ZoneID']]) + '.jpg&bucket=andre-toi-out&q=50'
)
elif x['TagName'] == 'ImageCaption':
body += '<div class="cap">' + x['ZoneText'] + '</div><p>'
elif x['TagName'] == 'Lead':
body += '<div class="lead"><p><i>' + x['ZoneText'] + '</i></p></div><p>'
elif 'ZoneText' in x:
body += '<p><i>' + x['ZoneText'] + '</i></p>'
return '<html><body><div>' \
+ body.replace('<br>', '<p>').replace('<br/>', '<p>').replace('&lt;br&gt;', '<p>').replace('\n', '<br>') \
+ '</div></body></html>'
return ('<html><body><div>'
+ body.replace('<br>', '<p>').replace('<br/>', '<p>').replace('&lt;br&gt;', '<p>').replace('\n', '<br>') +
'</div></body></html>')
def preprocess_html(self, soup):
h1 = soup.find('h1')

View File

@ -154,8 +154,7 @@ class CanWestPaper(BasicNewsRecipe):
except:
while daysback < 7:
cover = 'http://webmedia.newseum.org/newseum-multimedia/dfp/jpg' + \
str((date.today() - timedelta(days=daysback)).day) + \
'/lg/' + self.fp_tag + '.jpg'
str((date.today() - timedelta(days=daysback)).day) + '/lg/' + self.fp_tag + '.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -155,8 +155,7 @@ class CanWestPaper(BasicNewsRecipe):
except:
while daysback < 7:
cover = 'http://webmedia.newseum.org/newseum-multimedia/dfp/jpg' + \
str((date.today() - timedelta(days=daysback)).day) + \
'/lg/' + self.fp_tag + '.jpg'
str((date.today() - timedelta(days=daysback)).day) + '/lg/' + self.fp_tag + '.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -117,8 +117,7 @@ class TimesColonist(BasicNewsRecipe):
except:
while daysback < 7:
cover = 'http://webmedia.newseum.org/newseum-multimedia/dfp/jpg' + \
str((date.today() - timedelta(days=daysback)).day) + \
'/lg/' + self.fp_tag + '.jpg'
str((date.today() - timedelta(days=daysback)).day) + '/lg/' + self.fp_tag + '.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -87,8 +87,8 @@ class TheWashingtonPost(BasicNewsRecipe):
author = ''
if 'credits' in data:
author = '<div><span class="auth">' + 'By ' + ', '.join(x['name'] for x in data['credits']['by']) \
+ '</span> | <span class="time">' + data['publish_date'][:-14] + '</span></div>'
author = ('<div><span class="auth">' + 'By ' + ', '.join(x['name'] for x in data['credits']['by']) +
'</span> | <span class="time">' + data['publish_date'][:-14] + '</span></div>')
body = ''
for x in data['content_elements']:

View File

@ -77,8 +77,8 @@ class wapoprint(BasicNewsRecipe):
author = ''
if 'credits' in data:
author = '<div><span class="auth">' + 'By ' + ', '.join(x['name'] for x in data['credits']['by']) \
+ '</span> | <span class="time">' + data['publish_date'][:-14] + '</span></div>'
author = ('<div><span class="auth">' + 'By ' + ', '.join(x['name'] for x in data['credits']['by']) +
'</span> | <span class="time">' + data['publish_date'][:-14] + '</span></div>')
body = ''
for x in data['content_elements']:

View File

@ -113,8 +113,7 @@ class CanWestPaper(BasicNewsRecipe):
except:
while daysback < 7:
cover = 'http://webmedia.newseum.org/newseum-multimedia/dfp/jpg' + \
str((date.today() - timedelta(days=daysback)).day) + \
'/lg/' + self.fp_tag + '.jpg'
str((date.today() - timedelta(days=daysback)).day) + '/lg/' + self.fp_tag + '.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)

View File

@ -281,12 +281,12 @@ def main():
elif command == 'info':
info(dev)
elif command == 'cp':
usage='usage: %prog cp [options] source destination\nCopy files to/from the device\n\n'+\
'One of source or destination must be a path on the device. \n\nDevice paths have the form\n'+\
'dev:mountpoint/my/path\n'+\
'where mountpoint is one of / or carda: or cardb:/\n\n'+\
'source must point to a file for which you have read permissions\n'+\
'destination must point to a file or folder for which you have write permissions'
usage=('usage: %prog cp [options] source destination\nCopy files to/from the device\n\n'
'One of source or destination must be a path on the device. \n\nDevice paths have the form\n'
'dev:mountpoint/my/path\n'
'where mountpoint is one of / or carda: or cardb:/\n\n'
'source must point to a file for which you have read permissions\n'
'destination must point to a file or folder for which you have write permissions')
parser = OptionParser(usage=usage)
parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='Overwrite the destination file if it exists already.')

View File

@ -133,10 +133,10 @@ class ControlError(ProtocolError):
def __str__(self):
if self.query and self.response:
return 'Got unexpected response:\n' + \
'query:\n'+str(self.query.query)+'\n'+\
'expected:\n'+str(self.query.response)+'\n' +\
'actual:\n'+str(self.response)
return ('Got unexpected response:\n'
'query:\n'+str(self.query.query)+'\n'
'expected:\n'+str(self.query.response)+'\n'
'actual:\n'+str(self.response))
if self.desc:
return self.desc
return 'Unknown control error occurred'

View File

@ -53,9 +53,7 @@ class field:
def __repr__(self):
typ = {DWORD: 'unsigned int', 'QWORD': 'unsigned long long', BYTE: 'unsigned char', WORD: 'unsigned short'}.get(self._fmt, '')
return 'An ' + typ + ' stored in ' + \
str(struct.calcsize(self._fmt)) + \
' bytes starting at byte ' + str(self._start)
return 'An ' + typ + ' stored in ' + str(struct.calcsize(self._fmt)) + ' bytes starting at byte ' + str(self._start)
class versioned_field(field):
@ -110,8 +108,8 @@ class fixed_stringfield:
obj.pack(val, start=self._start, fmt='<'+str(len(val))+'s')
def __repr__(self):
return 'A string of length ' + str(self._length) + \
' starting at byte ' + str(self._start)
return ('A string of length ' + str(self._length) +
' starting at byte ' + str(self._start))
class xml_attr_field:

View File

@ -702,8 +702,7 @@ class Metadata:
res = cmeta['is_multiple']['list_to_ui'].join(res)
elif datatype == 'series' and series_with_index:
if self.get_extra(key) is not None:
res = res + \
' [%s]'%self.format_series_index(val=self.get_extra(key))
res = res + ' [%s]'%self.format_series_index(val=self.get_extra(key))
elif datatype == 'datetime':
res = format_date(res, cmeta['display'].get('date_format','dd MMM yyyy'))
elif datatype == 'bool':

View File

@ -17,8 +17,7 @@ from calibre.ebooks.metadata.book.base import Metadata
from polyglot.builtins import codepoint_to_chr
from polyglot.urllib import parse_qs, quote_plus
URL = \
'http://ww2.kdl.org/libcat/WhatsNext.asp?AuthorLastName={0}&AuthorFirstName=&SeriesName=&BookTitle={1}&CategoryID=0&cmdSearch=Search&Search=1&grouping='
URL = 'http://ww2.kdl.org/libcat/WhatsNext.asp?AuthorLastName={0}&AuthorFirstName=&SeriesName=&BookTitle={1}&CategoryID=0&cmdSearch=Search&Search=1&grouping='
_ignore_starts = '\'"'+''.join(codepoint_to_chr(x) for x in list(range(0x2018, 0x201e))+[0x2032, 0x2033])

View File

@ -1645,8 +1645,7 @@ def metadata_to_opf(mi, as_string=True, default_lang=None):
mi.uuid = str(uuid.uuid4())
if not mi.book_producer:
mi.book_producer = __appname__ + ' (%s) '%__version__ + \
'[https://calibre-ebook.com]'
mi.book_producer = __appname__ + ' (%s) '%__version__ + '[https://calibre-ebook.com]'
if not mi.languages:
lang = (get_lang().replace('_', '-').partition('-')[0] if default_lang

View File

@ -328,8 +328,7 @@ Examples
'mi<tg<close_____<field\n'
) % (self.__marker, instruction, inner_field_string)
if sec_in_field:
inner_field_string = 'mi<mk<sec-fd-beg\n' + inner_field_string + \
'mi<mk<sec-fd-end\n'
inner_field_string = 'mi<mk<sec-fd-beg\n' + inner_field_string + 'mi<mk<sec-fd-end\n'
if par_in_field:
inner_field_string = 'mi<mk<par-in-fld\n' + inner_field_string
if len(self.__field_string) == 0:

View File

@ -348,10 +348,8 @@ class ListTable:
Method no longer used.
'''
self.__list_table_final = 'mi<mk<listabbeg_\n'
self.__list_table_final += 'mi<tg<open______<list-table\n' + \
'mi<mk<listab-beg\n' + self.__list_table_final
self.__list_table_final += \
'mi<mk<listab-end\n' + 'mi<tg<close_____<list-table\n'
self.__list_table_final += 'mi<tg<open______<list-table\n' + 'mi<mk<listab-beg\n' + self.__list_table_final
self.__list_table_final += 'mi<mk<listab-end\n' + 'mi<tg<close_____<list-table\n'
self.__list_table_final += 'mi<mk<listabend_\n'
def __write_final_string(self):
@ -372,8 +370,7 @@ class ListTable:
not_allow = ['list-id',]
id = 0
self.__list_table_final = 'mi<mk<listabbeg_\n'
self.__list_table_final += 'mi<tg<open______<list-table\n' + \
'mi<mk<listab-beg\n' + self.__list_table_final
self.__list_table_final += 'mi<tg<open______<list-table\n' + 'mi<mk<listab-beg\n' + self.__list_table_final
for list in self.__all_lists:
id += 1
self.__list_table_final += 'mi<tg<open-att__<list-in-table'
@ -422,8 +419,7 @@ class ListTable:
# self.__list_table_final += '<bullet-type>%s' % (bullet_text)
self.__list_table_final += '\n'
self.__list_table_final += 'mi<tg<close_____<list-in-table\n'
self.__list_table_final += \
'mi<mk<listab-end\n' + 'mi<tg<close_____<list-table\n'
self.__list_table_final += 'mi<mk<listab-end\n' + 'mi<tg<close_____<list-table\n'
self.__list_table_final += 'mi<mk<listabend_\n'
def parse_list_table(self, line):

View File

@ -180,8 +180,7 @@ class OverrideTable:
the attributes and values of the tag from the dictionary.
'''
self.__override_table_final = 'mi<mk<over_beg_\n'
self.__override_table_final += 'mi<tg<open______<override-table\n' + \
'mi<mk<overbeg__\n' + self.__override_table_final
self.__override_table_final += 'mi<tg<open______<override-table\n' + 'mi<mk<overbeg__\n' + self.__override_table_final
for the_dict in self.__override_list:
self.__override_table_final += 'mi<tg<empty-att_<override-list'
the_keys = the_dict.keys()
@ -190,8 +189,7 @@ class OverrideTable:
f'<{the_key}>{the_dict[the_key]}'
self.__override_table_final += '\n'
self.__override_table_final += '\n'
self.__override_table_final += \
'mi<mk<overri-end\n' + 'mi<tg<close_____<override-table\n'
self.__override_table_final += 'mi<mk<overri-end\n' + 'mi<tg<close_____<override-table\n'
self.__override_table_final += 'mi<mk<overribend_\n'
def parse_override_table(self, line):

View File

@ -232,10 +232,8 @@ cw<ci<font-style<nu<0
'''
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__font_table_final = 'mi<tg<open______<font-table\n' + \
'mi<mk<fonttb-beg\n' + self.__font_table_final
self.__font_table_final += \
'mi<mk<fonttb-end\n' + 'mi<tg<close_____<font-table\n'
self.__font_table_final = 'mi<tg<open______<font-table\n' + 'mi<mk<fonttb-beg\n' + self.__font_table_final
self.__font_table_final += 'mi<mk<fonttb-end\n' + 'mi<tg<close_____<font-table\n'
elif self.__token_info == 'ob<nu<open-brack':
if int(self.__ob_count) == int(self.__close_group_count) + 1:
self.__font_table_final += 'mi<mk<fontit-beg\n'
@ -289,10 +287,8 @@ cw<ci<font-style<nu<0
def __color_table_func(self, line):
if int(self.__cb_count) == int(self.__close_group_count):
self.__state = 'preamble'
self.__color_table_final = 'mi<tg<open______<color-table\n' + \
'mi<mk<clrtbl-beg\n' + self.__color_table_final
self.__color_table_final += \
'mi<mk<clrtbl-end\n' + 'mi<tg<close_____<color-table\n'
self.__color_table_final = 'mi<tg<open______<color-table\n' + 'mi<mk<clrtbl-beg\n' + self.__color_table_final
self.__color_table_final += 'mi<mk<clrtbl-end\n' + 'mi<tg<close_____<color-table\n'
else:
self.__color_table_final += line
@ -308,10 +304,8 @@ cw<ci<font-style<nu<0
'''
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__style_sheet_final = 'mi<tg<open______<style-table\n' + \
'mi<mk<styles-beg\n' + self.__style_sheet_final
self.__style_sheet_final += \
'mi<mk<styles-end\n' + 'mi<tg<close_____<style-table\n'
self.__style_sheet_final = 'mi<tg<open______<style-table\n' + 'mi<mk<styles-beg\n' + self.__style_sheet_final
self.__style_sheet_final += 'mi<mk<styles-end\n' + 'mi<tg<close_____<style-table\n'
elif self.__token_info == 'ob<nu<open-brack':
if int(self.__ob_count) == int(self.__close_group_count) + 1:
self.__style_sheet_final += 'mi<mk<stylei-beg\n'
@ -369,10 +363,8 @@ cw<ci<font-style<nu<0
def __revision_table_func(self, line):
if int(self.__cb_count) == int(self.__close_group_count):
self.__state = 'preamble'
self.__revision_table_final = 'mi<tg<open______<revision-table\n' + \
'mi<mk<revtbl-beg\n' + self.__revision_table_final
self.__revision_table_final += \
'mi<mk<revtbl-end\n' + 'mi<tg<close_____<revision-table\n'
self.__revision_table_final = 'mi<tg<open______<revision-table\n' + 'mi<mk<revtbl-beg\n' + self.__revision_table_final
self.__revision_table_final += 'mi<mk<revtbl-end\n' + 'mi<tg<close_____<revision-table\n'
else:
self.__revision_table_final += line
@ -385,10 +377,8 @@ cw<ci<font-style<nu<0
def __doc_info_func(self, line):
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__doc_info_table_final = 'mi<tg<open______<doc-information\n' + \
'mi<mk<doc-in-beg\n' + self.__doc_info_table_final
self.__doc_info_table_final += \
'mi<mk<doc-in-end\n' + 'mi<tg<close_____<doc-information\n'
self.__doc_info_table_final = 'mi<tg<open______<doc-information\n' + 'mi<mk<doc-in-beg\n' + self.__doc_info_table_final
self.__doc_info_table_final += 'mi<mk<doc-in-end\n' + 'mi<tg<close_____<doc-information\n'
elif self.__token_info == 'ob<nu<open-brack':
if int(self.__ob_count) == int(self.__close_group_count) + 1:
self.__doc_info_table_final += 'mi<mk<docinf-beg\n'

View File

@ -720,9 +720,9 @@ class ProcessTokens:
numerator = float(re.search(r'[0-9.\-]+', numerator).group())
except TypeError:
if self.__run_level > 3:
msg = ('No number to process?\nthis indicates that the token \\(\\li\\) \
should have a number and does not\nnumerator is \
"%s"\ndenominator is "%s"\n') % (numerator, denominator)
msg = ('No number to process?\nthis indicates that the token \\(\\li\\)'
'should have a number and does not\nnumerator is'
'"%s"\ndenominator is "%s"\n') % (numerator, denominator)
raise self.__bug_handler(msg)
if 5 > self.__return_code:
self.__return_code = 5

View File

@ -174,8 +174,8 @@ class Tokenize:
self.__cwdigit_exp = re.compile(r'(\\[a-zA-Z]+[\-0-9]+)([^0-9 \\]+)')
def tokenize(self):
'''Main class for handling other methods. Reads the file \
, uses method self.sub_reg to make basic substitutions,\
'''Main class for handling other methods. Reads the file,
uses method self.sub_reg to make basic substitutions,
and process tokens by itself'''
# read
with open_for_read(self.__file) as read_obj:

View File

@ -556,11 +556,11 @@ class CatalogBuilder:
if author[0] == current_author[0]:
if self.opts.fmt == 'mobi':
# Exit if building MOBI
error_msg = _('<p>Inconsistent author sort values for author<br/>' +
f"'{author[0]}':</p>" +
f'<p><center><b>{author[1]}</b> != <b>{current_author[1]}</b></center></p>' +
'<p>Unable to build MOBI catalog.<br/>' +
f"Select all books by '{author[0]}', apply correct Author Sort value in Edit Metadata dialog, then rebuild the catalog.\n<p>") # noqa: E501
error_msg = _("<p>Inconsistent author sort values for author<br/>'{0}':</p>"
'<p><center><b>{1}</b> != <b>{2}</b></center></p>'
'<p>Unable to build MOBI catalog.<br/>'
"Select all books by '{0}', apply correct Author Sort value in Edit Metadata dialog, then rebuild the catalog.<p>"
).format(author[0], author[1], current_author[1])
self.opts.log.warn('\n*** Metadata error ***')
self.opts.log.warn(error_msg)