mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
uniform string quote (extra-edit)
This commit is contained in:
parent
37771022ce
commit
b5aca6ff5c
@ -21,7 +21,7 @@ class AlternativesEconomiques(BasicNewsRecipe):
|
|||||||
remove_images = False
|
remove_images = False
|
||||||
|
|
||||||
def get_cover_url(self):
|
def get_cover_url(self):
|
||||||
"""Récupère dynamiquement l'URL de la dernière une depuis MLP"""
|
'''Récupère dynamiquement l'URL de la dernière une depuis MLP'''
|
||||||
br = self.get_browser()
|
br = self.get_browser()
|
||||||
try:
|
try:
|
||||||
# Accéder à la page du magazine sur MLP
|
# Accéder à la page du magazine sur MLP
|
||||||
|
@ -74,7 +74,7 @@ class Fokus(BasicNewsRecipe):
|
|||||||
return br
|
return br
|
||||||
|
|
||||||
def get_web_sections(self, main_url: str) -> dict[str, str]:
|
def get_web_sections(self, main_url: str) -> dict[str, str]:
|
||||||
"""Return a dict of (1) section URL and (2) section name key-value pairs found at `main_url`.
|
'''Return a dict of (1) section URL and (2) section name key-value pairs found at `main_url`.
|
||||||
|
|
||||||
For example, if the Fokus website currently includes an 'Aktuellt' section, the dict should include an entry on
|
For example, if the Fokus website currently includes an 'Aktuellt' section, the dict should include an entry on
|
||||||
the form: `{'https://www.fokus.se/aktuellt': 'Aktuellt'}`.
|
the form: `{'https://www.fokus.se/aktuellt': 'Aktuellt'}`.
|
||||||
@ -84,7 +84,7 @@ class Fokus(BasicNewsRecipe):
|
|||||||
|
|
||||||
Yields:
|
Yields:
|
||||||
dict[str, str]: (1) URLs and (2) human-readable names of Fokus sections.
|
dict[str, str]: (1) URLs and (2) human-readable names of Fokus sections.
|
||||||
"""
|
'''
|
||||||
self.log(f"Identifying all sections under '{main_url}'...")
|
self.log(f"Identifying all sections under '{main_url}'...")
|
||||||
soup = self.index_to_soup(main_url)
|
soup = self.index_to_soup(main_url)
|
||||||
|
|
||||||
@ -110,14 +110,14 @@ class Fokus(BasicNewsRecipe):
|
|||||||
return section_urls_and_names
|
return section_urls_and_names
|
||||||
|
|
||||||
def parse_article_blurb(self, article_blurb) -> dict[str, str, str, str] | None:
|
def parse_article_blurb(self, article_blurb) -> dict[str, str, str, str] | None:
|
||||||
"""Given a <article> tag of class 'Blurb', parse it into a dict.
|
'''Given a <article> tag of class 'Blurb', parse it into a dict.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
article_blurb (Tag): An <article> tag hosting metadata and the URL of an article.
|
article_blurb (Tag): An <article> tag hosting metadata and the URL of an article.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
dict[str, str, str, str]: A dict on a `{'url': str, 'title': str, 'description': str, 'date': str}` format.
|
dict[str, str, str, str]: A dict on a `{'url': str, 'title': str, 'description': str, 'date': str}` format.
|
||||||
"""
|
'''
|
||||||
if a_tag := article_blurb.find('a', href=True):
|
if a_tag := article_blurb.find('a', href=True):
|
||||||
url = a_tag['href'].strip().rstrip('/')
|
url = a_tag['href'].strip().rstrip('/')
|
||||||
if url.startswith('/'):
|
if url.startswith('/'):
|
||||||
@ -147,7 +147,7 @@ class Fokus(BasicNewsRecipe):
|
|||||||
return
|
return
|
||||||
|
|
||||||
def _get_article_blurbs(self, soup) -> dict[str, dict[str, str, str, str]]:
|
def _get_article_blurbs(self, soup) -> dict[str, dict[str, str, str, str]]:
|
||||||
"""Given a Fokus webpage `soup`, return a dict of unique article entries found on the page.
|
'''Given a Fokus webpage `soup`, return a dict of unique article entries found on the page.
|
||||||
|
|
||||||
The key of a given entry in the output dictionary is the article URL. The corresponding value is a dictionary
|
The key of a given entry in the output dictionary is the article URL. The corresponding value is a dictionary
|
||||||
on a `{'url': str, 'title': str, 'description': str, 'date': str}` format.
|
on a `{'url': str, 'title': str, 'description': str, 'date': str}` format.
|
||||||
@ -157,7 +157,7 @@ class Fokus(BasicNewsRecipe):
|
|||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
dict[str, dict[str, str, str, str]]: A dict with article URLs as keys and 'article dicts' as values.
|
dict[str, dict[str, str, str, str]]: A dict with article URLs as keys and 'article dicts' as values.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def _log(article) -> None:
|
def _log(article) -> None:
|
||||||
'''Log a digestible summary of the input `article` blurb.'''
|
'''Log a digestible summary of the input `article` blurb.'''
|
||||||
@ -187,7 +187,7 @@ class Fokus(BasicNewsRecipe):
|
|||||||
return parsed_blurbs
|
return parsed_blurbs
|
||||||
|
|
||||||
def get_article_blurbs(self, sections: dict[str, str]) -> dict[str, dict[str, str, str, str]]:
|
def get_article_blurbs(self, sections: dict[str, str]) -> dict[str, dict[str, str, str, str]]:
|
||||||
"""Create and return a dict of all unique article blurbs found in all `sections`.
|
'''Create and return a dict of all unique article blurbs found in all `sections`.
|
||||||
|
|
||||||
The key of a given entry in the output dictionary is the article URL. The corresponding value is a dictionary
|
The key of a given entry in the output dictionary is the article URL. The corresponding value is a dictionary
|
||||||
on a `{'url': str, 'title': str, 'description': str, 'date': str}` format.
|
on a `{'url': str, 'title': str, 'description': str, 'date': str}` format.
|
||||||
@ -197,7 +197,7 @@ class Fokus(BasicNewsRecipe):
|
|||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
dict[str, dict[str, str, str, str]]: A dict with article URLs as keys and 'article dicts' as values.
|
dict[str, dict[str, str, str, str]]: A dict with article URLs as keys and 'article dicts' as values.
|
||||||
"""
|
'''
|
||||||
self.log(f'Identifying all articles under all {len(sections)} sections...')
|
self.log(f'Identifying all articles under all {len(sections)} sections...')
|
||||||
|
|
||||||
article_blurbs = {}
|
article_blurbs = {}
|
||||||
|
@ -27,7 +27,7 @@ class IEEESpectrumMagazine(BasicNewsRecipe):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def get_cover_url(self):
|
def get_cover_url(self):
|
||||||
"""Go to this month's URL and pull cover image from there."""
|
'''Go to this month's URL and pull cover image from there.'''
|
||||||
month_url = 'https://spectrum.ieee.org/magazine/{}'.format(self.year_month)
|
month_url = 'https://spectrum.ieee.org/magazine/{}'.format(self.year_month)
|
||||||
soup = self.index_to_soup(month_url)
|
soup = self.index_to_soup(month_url)
|
||||||
img_meta = soup.find('meta', property='og:image')
|
img_meta = soup.find('meta', property='og:image')
|
||||||
|
@ -48,7 +48,7 @@ class LeCanardEnchaine(BasicNewsRecipe):
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
def get_cover_url(self):
|
def get_cover_url(self):
|
||||||
"""Récupère dynamiquement l'URL de la dernière une"""
|
'''Récupère dynamiquement l'URL de la dernière une'''
|
||||||
br = self.get_browser()
|
br = self.get_browser()
|
||||||
try:
|
try:
|
||||||
soup = self.index_to_soup(br.open('https://boutique.lecanardenchaine.fr/acheter-au-numero/').read())
|
soup = self.index_to_soup(br.open('https://boutique.lecanardenchaine.fr/acheter-au-numero/').read())
|
||||||
|
@ -53,11 +53,11 @@ class Pocket(BasicNewsRecipe):
|
|||||||
articles = []
|
articles = []
|
||||||
|
|
||||||
def get_browser(self, *args, **kwargs):
|
def get_browser(self, *args, **kwargs):
|
||||||
"""
|
'''
|
||||||
We need to pretend to be a recent version of safari for the mac to
|
We need to pretend to be a recent version of safari for the mac to
|
||||||
prevent User-Agent checks Pocket api requires username and password so
|
prevent User-Agent checks Pocket api requires username and password so
|
||||||
fail loudly if it's missing from the config.
|
fail loudly if it's missing from the config.
|
||||||
"""
|
'''
|
||||||
br = BasicNewsRecipe.get_browser(self,
|
br = BasicNewsRecipe.get_browser(self,
|
||||||
user_agent='Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; \
|
user_agent='Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; \
|
||||||
en-us) AppleWebKit/533.19.4 (KHTML, like Gecko) \
|
en-us) AppleWebKit/533.19.4 (KHTML, like Gecko) \
|
||||||
@ -151,10 +151,10 @@ class Pocket(BasicNewsRecipe):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def default_cover(self, cover_file):
|
def default_cover(self, cover_file):
|
||||||
"""
|
'''
|
||||||
Create a generic cover for recipes that don't have a cover
|
Create a generic cover for recipes that don't have a cover
|
||||||
This override adds time to the cover
|
This override adds time to the cover
|
||||||
"""
|
'''
|
||||||
try:
|
try:
|
||||||
from calibre.ebooks.covers import calibre_cover2
|
from calibre.ebooks.covers import calibre_cover2
|
||||||
title = self.title if isinstance(self.title, type(u'')) else \
|
title = self.title if isinstance(self.title, type(u'')) else \
|
||||||
|
@ -206,7 +206,7 @@ class TimesColonist(BasicNewsRecipe):
|
|||||||
if atag is not None:
|
if atag is not None:
|
||||||
url = atag['href']
|
url = atag['href']
|
||||||
url = url.strip()
|
url = url.strip()
|
||||||
# print("Checking >>"+url+'<<\n\r')
|
# print('Checking >>'+url+'<<\n\r')
|
||||||
if url.startswith('/'):
|
if url.startswith('/'):
|
||||||
url = self.url_prefix + url
|
url = self.url_prefix + url
|
||||||
if url in self.url_list:
|
if url in self.url_list:
|
||||||
|
@ -608,12 +608,12 @@ class Build(Command):
|
|||||||
os.remove(x)
|
os.remove(x)
|
||||||
|
|
||||||
def check_call(self, *args, **kwargs):
|
def check_call(self, *args, **kwargs):
|
||||||
"""print cmdline if an error occurred
|
'''print cmdline if an error occurred
|
||||||
|
|
||||||
If something is missing (cmake e.g.) you get a non-informative error
|
If something is missing (cmake e.g.) you get a non-informative error
|
||||||
self.check_call(qmc + [ext.name+'.pro'])
|
self.check_call(qmc + [ext.name+'.pro'])
|
||||||
so you would have to look at the source to see the actual command.
|
so you would have to look at the source to see the actual command.
|
||||||
"""
|
'''
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(*args, **kwargs)
|
subprocess.check_call(*args, **kwargs)
|
||||||
except:
|
except:
|
||||||
|
@ -359,7 +359,7 @@ class DevicePlugin(Plugin):
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def total_space(self, end_session=True):
|
def total_space(self, end_session=True):
|
||||||
"""
|
'''
|
||||||
Get total space available on the mountpoints:
|
Get total space available on the mountpoints:
|
||||||
1. Main memory
|
1. Main memory
|
||||||
2. Memory Card A
|
2. Memory Card A
|
||||||
@ -368,11 +368,11 @@ class DevicePlugin(Plugin):
|
|||||||
:return: A 3 element list with total space in bytes of (1, 2, 3). If a
|
:return: A 3 element list with total space in bytes of (1, 2, 3). If a
|
||||||
particular device doesn't have any of these locations it should return 0.
|
particular device doesn't have any of these locations it should return 0.
|
||||||
|
|
||||||
"""
|
'''
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def free_space(self, end_session=True):
|
def free_space(self, end_session=True):
|
||||||
"""
|
'''
|
||||||
Get free space available on the mountpoints:
|
Get free space available on the mountpoints:
|
||||||
1. Main memory
|
1. Main memory
|
||||||
2. Card A
|
2. Card A
|
||||||
@ -381,11 +381,11 @@ class DevicePlugin(Plugin):
|
|||||||
:return: A 3 element list with free space in bytes of (1, 2, 3). If a
|
:return: A 3 element list with free space in bytes of (1, 2, 3). If a
|
||||||
particular device doesn't have any of these locations it should return -1.
|
particular device doesn't have any of these locations it should return -1.
|
||||||
|
|
||||||
"""
|
'''
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def books(self, oncard=None, end_session=True):
|
def books(self, oncard=None, end_session=True):
|
||||||
"""
|
'''
|
||||||
Return a list of e-books on the device.
|
Return a list of e-books on the device.
|
||||||
|
|
||||||
:param oncard: If 'carda' or 'cardb' return a list of e-books on the
|
:param oncard: If 'carda' or 'cardb' return a list of e-books on the
|
||||||
@ -395,7 +395,7 @@ class DevicePlugin(Plugin):
|
|||||||
|
|
||||||
:return: A BookList.
|
:return: A BookList.
|
||||||
|
|
||||||
"""
|
'''
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def upload_books(self, files, names, on_card=None, end_session=True,
|
def upload_books(self, files, names, on_card=None, end_session=True,
|
||||||
|
@ -16,7 +16,7 @@ class FastPageGenerator(IPageGenerator):
|
|||||||
raise Exception('Fast calculation impossible.')
|
raise Exception('Fast calculation impossible.')
|
||||||
|
|
||||||
def _generate(self, mobi_file_path: str, real_count: int | None) -> Pages:
|
def _generate(self, mobi_file_path: str, real_count: int | None) -> Pages:
|
||||||
"""
|
'''
|
||||||
2300 characters of uncompressed text per page. This is
|
2300 characters of uncompressed text per page. This is
|
||||||
not meant to map 1 to 1 to a print book but to be a
|
not meant to map 1 to 1 to a print book but to be a
|
||||||
close enough measure.
|
close enough measure.
|
||||||
@ -30,7 +30,7 @@ class FastPageGenerator(IPageGenerator):
|
|||||||
accessible in MOBI files (part of the header). Also,
|
accessible in MOBI files (part of the header). Also,
|
||||||
It's faster to work off of the length then to
|
It's faster to work off of the length then to
|
||||||
decompress and parse the actual text.
|
decompress and parse the actual text.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
pages = []
|
pages = []
|
||||||
count = 0
|
count = 0
|
||||||
|
@ -61,11 +61,11 @@ class Bookmark(): # {{{
|
|||||||
bpl = bpar_offset + 4
|
bpl = bpar_offset + 4
|
||||||
bpar_len, = unpack('>I', data[bpl:bpl+4])
|
bpar_len, = unpack('>I', data[bpl:bpl+4])
|
||||||
bpar_len += 8
|
bpar_len += 8
|
||||||
# print "bpar_len: 0x%x" % bpar_len
|
# print 'bpar_len: 0x%x' % bpar_len
|
||||||
eo = bpar_offset + bpar_len
|
eo = bpar_offset + bpar_len
|
||||||
|
|
||||||
# Walk bookmark entries
|
# Walk bookmark entries
|
||||||
# print " --- %s --- " % self.path
|
# print ' --- %s --- ' % self.path
|
||||||
current_entry = 1
|
current_entry = 1
|
||||||
sig = data[eo:eo+4]
|
sig = data[eo:eo+4]
|
||||||
previous_block = None
|
previous_block = None
|
||||||
@ -80,7 +80,7 @@ class Bookmark(): # {{{
|
|||||||
current_block = 'data_header'
|
current_block = 'data_header'
|
||||||
# entry_type = "data_header"
|
# entry_type = "data_header"
|
||||||
location, = unpack('>I', data[eo+0x34:eo+0x38])
|
location, = unpack('>I', data[eo+0x34:eo+0x38])
|
||||||
# print "data_header location: %d" % location
|
# print 'data_header location: %d' % location
|
||||||
else:
|
else:
|
||||||
current_block = 'text_block'
|
current_block = 'text_block'
|
||||||
if previous_block == 'empty_data':
|
if previous_block == 'empty_data':
|
||||||
@ -125,7 +125,7 @@ class Bookmark(): # {{{
|
|||||||
# be the same - cheat by nudging -1
|
# be the same - cheat by nudging -1
|
||||||
# Skip bookmark for last_read_location
|
# Skip bookmark for last_read_location
|
||||||
if end_loc != self.last_read:
|
if end_loc != self.last_read:
|
||||||
# print " adding Bookmark at 0x%x (%d)" % (end_loc, end_loc/MAGIC_MOBI_CONSTANT + 1)
|
# print ' adding Bookmark at 0x%x (%d)' % (end_loc, end_loc/MAGIC_MOBI_CONSTANT + 1)
|
||||||
displayed_location = end_loc // MAGIC_MOBI_CONSTANT + 1
|
displayed_location = end_loc // MAGIC_MOBI_CONSTANT + 1
|
||||||
user_notes[end_loc - 1] = dict(id=self.id,
|
user_notes[end_loc - 1] = dict(id=self.id,
|
||||||
displayed_location=displayed_location,
|
displayed_location=displayed_location,
|
||||||
|
@ -321,7 +321,7 @@ class KOBO(USBMS):
|
|||||||
playlist_map[lpath].append('Recommendation')
|
playlist_map[lpath].append('Recommendation')
|
||||||
|
|
||||||
path = self.normalize_path(path)
|
path = self.normalize_path(path)
|
||||||
# print "Normalized FileName: " + path
|
# print 'Normalized FileName: ' + path
|
||||||
|
|
||||||
idx = bl_cache.get(lpath, None)
|
idx = bl_cache.get(lpath, None)
|
||||||
if idx is not None:
|
if idx is not None:
|
||||||
@ -332,7 +332,7 @@ class KOBO(USBMS):
|
|||||||
# Try the Touch version if the image does not exist
|
# Try the Touch version if the image does not exist
|
||||||
imagename = self.normalize_path(self._main_prefix + KOBO_ROOT_DIR_NAME + '/images/' + ImageID + ' - N3_LIBRARY_FULL.parsed')
|
imagename = self.normalize_path(self._main_prefix + KOBO_ROOT_DIR_NAME + '/images/' + ImageID + ' - N3_LIBRARY_FULL.parsed')
|
||||||
|
|
||||||
# print "Image name Normalized: " + imagename
|
# print 'Image name Normalized: ' + imagename
|
||||||
if not os.path.exists(imagename):
|
if not os.path.exists(imagename):
|
||||||
debug_print('Strange - The image name does not exist - title: ', title)
|
debug_print('Strange - The image name does not exist - title: ', title)
|
||||||
if imagename is not None:
|
if imagename is not None:
|
||||||
@ -452,7 +452,7 @@ class KOBO(USBMS):
|
|||||||
need_sync = True
|
need_sync = True
|
||||||
del bl[idx]
|
del bl[idx]
|
||||||
|
|
||||||
# print "count found in cache: %d, count of files in metadata: %d, need_sync: %s" % \
|
# print 'count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \
|
||||||
# (len(bl_cache), len(bl), need_sync)
|
# (len(bl_cache), len(bl), need_sync)
|
||||||
if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
|
if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
|
||||||
if oncard == 'cardb':
|
if oncard == 'cardb':
|
||||||
@ -561,33 +561,33 @@ class KOBO(USBMS):
|
|||||||
for i, path in enumerate(paths):
|
for i, path in enumerate(paths):
|
||||||
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
|
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
|
||||||
path = self.normalize_path(path)
|
path = self.normalize_path(path)
|
||||||
# print "Delete file normalized path: " + path
|
# print 'Delete file normalized path: ' + path
|
||||||
extension = os.path.splitext(path)[1]
|
extension = os.path.splitext(path)[1]
|
||||||
ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(path)
|
ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(path)
|
||||||
|
|
||||||
ContentID = self.contentid_from_path(path, ContentType)
|
ContentID = self.contentid_from_path(path, ContentType)
|
||||||
|
|
||||||
ImageID = self.delete_via_sql(ContentID, ContentType)
|
ImageID = self.delete_via_sql(ContentID, ContentType)
|
||||||
# print " We would now delete the Images for" + ImageID
|
# print ' We would now delete the Images for' + ImageID
|
||||||
self.delete_images(ImageID, path)
|
self.delete_images(ImageID, path)
|
||||||
|
|
||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
# Delete the ebook
|
# Delete the ebook
|
||||||
# print "Delete the ebook: " + path
|
# print 'Delete the ebook: ' + path
|
||||||
os.unlink(path)
|
os.unlink(path)
|
||||||
|
|
||||||
filepath = os.path.splitext(path)[0]
|
filepath = os.path.splitext(path)[0]
|
||||||
for ext in self.DELETE_EXTS:
|
for ext in self.DELETE_EXTS:
|
||||||
if os.path.exists(filepath + ext):
|
if os.path.exists(filepath + ext):
|
||||||
# print "Filename: " + filename
|
# print 'Filename: ' + filename
|
||||||
os.unlink(filepath + ext)
|
os.unlink(filepath + ext)
|
||||||
if os.path.exists(path + ext):
|
if os.path.exists(path + ext):
|
||||||
# print "Filename: " + filename
|
# print 'Filename: ' + filename
|
||||||
os.unlink(path + ext)
|
os.unlink(path + ext)
|
||||||
|
|
||||||
if self.SUPPORTS_SUB_DIRS:
|
if self.SUPPORTS_SUB_DIRS:
|
||||||
try:
|
try:
|
||||||
# print "removed"
|
# print 'removed'
|
||||||
os.removedirs(os.path.dirname(path))
|
os.removedirs(os.path.dirname(path))
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
@ -601,9 +601,9 @@ class KOBO(USBMS):
|
|||||||
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
|
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
|
||||||
for bl in booklists:
|
for bl in booklists:
|
||||||
for book in bl:
|
for book in bl:
|
||||||
# print "Book Path: " + book.path
|
# print 'Book Path: ' + book.path
|
||||||
if path.endswith(book.path):
|
if path.endswith(book.path):
|
||||||
# print " Remove: " + book.path
|
# print ' Remove: ' + book.path
|
||||||
bl.remove_book(book)
|
bl.remove_book(book)
|
||||||
self.report_progress(1.0, _('Removing books from device metadata listing...'))
|
self.report_progress(1.0, _('Removing books from device metadata listing...'))
|
||||||
|
|
||||||
@ -634,12 +634,12 @@ class KOBO(USBMS):
|
|||||||
prints('in add_books_to_metadata. Prefix is None!', path,
|
prints('in add_books_to_metadata. Prefix is None!', path,
|
||||||
self._main_prefix)
|
self._main_prefix)
|
||||||
continue
|
continue
|
||||||
# print "Add book to metadata: "
|
# print 'Add book to metadata: '
|
||||||
# print "prefix: " + prefix
|
# print 'prefix: ' + prefix
|
||||||
lpath = path.partition(prefix)[2]
|
lpath = path.partition(prefix)[2]
|
||||||
if lpath.startswith('/') or lpath.startswith('\\'):
|
if lpath.startswith('/') or lpath.startswith('\\'):
|
||||||
lpath = lpath[1:]
|
lpath = lpath[1:]
|
||||||
# print "path: " + lpath
|
# print 'path: ' + lpath
|
||||||
book = self.book_class(prefix, lpath, info.title, other=info)
|
book = self.book_class(prefix, lpath, info.title, other=info)
|
||||||
if book.size is None or book.size == 0:
|
if book.size is None or book.size == 0:
|
||||||
book.size = os.stat(self.normalize_path(path)).st_size
|
book.size = os.stat(self.normalize_path(path)).st_size
|
||||||
@ -686,13 +686,13 @@ class KOBO(USBMS):
|
|||||||
def get_content_type_from_extension(self, extension):
|
def get_content_type_from_extension(self, extension):
|
||||||
if extension == '.kobo':
|
if extension == '.kobo':
|
||||||
# Kobo books do not have book files. They do have some images though
|
# Kobo books do not have book files. They do have some images though
|
||||||
# print "kobo book"
|
# print 'kobo book'
|
||||||
ContentType = 6
|
ContentType = 6
|
||||||
elif extension == '.pdf' or extension == '.epub':
|
elif extension == '.pdf' or extension == '.epub':
|
||||||
# print "ePub or pdf"
|
# print 'ePub or pdf'
|
||||||
ContentType = 16
|
ContentType = 16
|
||||||
elif extension == '.rtf' or extension == '.txt' or extension == '.htm' or extension == '.html':
|
elif extension == '.rtf' or extension == '.txt' or extension == '.htm' or extension == '.html':
|
||||||
# print "txt"
|
# print 'txt'
|
||||||
if self.fwversion == (1,0) or self.fwversion == (1,4) or self.fwversion == (1,7,4):
|
if self.fwversion == (1,0) or self.fwversion == (1,4) or self.fwversion == (1,7,4):
|
||||||
ContentType = 999
|
ContentType = 999
|
||||||
else:
|
else:
|
||||||
@ -708,14 +708,14 @@ class KOBO(USBMS):
|
|||||||
print('path from_contentid cardb')
|
print('path from_contentid cardb')
|
||||||
elif oncard == 'carda':
|
elif oncard == 'carda':
|
||||||
path = path.replace('file:///mnt/sd/', self._card_a_prefix)
|
path = path.replace('file:///mnt/sd/', self._card_a_prefix)
|
||||||
# print "SD Card: " + path
|
# print 'SD Card: ' + path
|
||||||
else:
|
else:
|
||||||
if ContentType == '6' and MimeType == 'Shortcover':
|
if ContentType == '6' and MimeType == 'Shortcover':
|
||||||
# This is a hack as the kobo files do not exist
|
# This is a hack as the kobo files do not exist
|
||||||
# but the path is required to make a unique id
|
# but the path is required to make a unique id
|
||||||
# for calibre's reference
|
# for calibre's reference
|
||||||
path = self._main_prefix + path + '.kobo'
|
path = self._main_prefix + path + '.kobo'
|
||||||
# print "Path: " + path
|
# print 'Path: ' + path
|
||||||
elif (ContentType == '6' or ContentType == '10') and (
|
elif (ContentType == '6' or ContentType == '10') and (
|
||||||
MimeType == 'application/x-kobo-epub+zip' or (
|
MimeType == 'application/x-kobo-epub+zip' or (
|
||||||
MimeType == 'application/epub+zip' and self.isTolinoDevice())
|
MimeType == 'application/epub+zip' and self.isTolinoDevice())
|
||||||
@ -724,12 +724,12 @@ class KOBO(USBMS):
|
|||||||
path = self._main_prefix + path.replace('file:///mnt/onboard/', '')
|
path = self._main_prefix + path.replace('file:///mnt/onboard/', '')
|
||||||
else:
|
else:
|
||||||
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path
|
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path
|
||||||
# print "Internal: " + path
|
# print 'Internal: ' + path
|
||||||
else:
|
else:
|
||||||
# if path.startswith("file:///mnt/onboard/"):
|
# if path.startswith('file:///mnt/onboard/'):
|
||||||
path = path.replace('file:///mnt/onboard/', self._main_prefix)
|
path = path.replace('file:///mnt/onboard/', self._main_prefix)
|
||||||
path = path.replace('/mnt/onboard/', self._main_prefix)
|
path = path.replace('/mnt/onboard/', self._main_prefix)
|
||||||
# print "Internal: " + path
|
# print 'Internal: ' + path
|
||||||
|
|
||||||
return path
|
return path
|
||||||
|
|
||||||
@ -1820,7 +1820,7 @@ class KOBOTOUCH(KOBO):
|
|||||||
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
|
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
|
||||||
|
|
||||||
path = self.normalize_path(path)
|
path = self.normalize_path(path)
|
||||||
# print "Normalized FileName: " + path
|
# print 'Normalized FileName: ' + path
|
||||||
|
|
||||||
# Collect the Kobo metadata
|
# Collect the Kobo metadata
|
||||||
authors_list = [a.strip() for a in authors.split('&')] if authors is not None else [_('Unknown')]
|
authors_list = [a.strip() for a in authors.split('&')] if authors is not None else [_('Unknown')]
|
||||||
@ -2144,7 +2144,7 @@ class KOBOTOUCH(KOBO):
|
|||||||
else:
|
else:
|
||||||
debug_print("KoboTouch:books - Book in mtadata.calibre, on file system but not database - bl[idx].title:'%s'"%bl[idx].title)
|
debug_print("KoboTouch:books - Book in mtadata.calibre, on file system but not database - bl[idx].title:'%s'"%bl[idx].title)
|
||||||
|
|
||||||
# print "count found in cache: %d, count of files in metadata: %d, need_sync: %s" % \
|
# print 'count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \
|
||||||
# (len(bl_cache), len(bl), need_sync)
|
# (len(bl_cache), len(bl), need_sync)
|
||||||
# Bypassing the KOBO sync_booklists as that does things we don't need to do
|
# Bypassing the KOBO sync_booklists as that does things we don't need to do
|
||||||
# Also forcing sync to see if this solves issues with updating shelves and matching books.
|
# Also forcing sync to see if this solves issues with updating shelves and matching books.
|
||||||
@ -2204,11 +2204,11 @@ class KOBOTOUCH(KOBO):
|
|||||||
else:
|
else:
|
||||||
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path
|
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path
|
||||||
else: # Should never get here, but, just in case...
|
else: # Should never get here, but, just in case...
|
||||||
# if path.startswith("file:///mnt/onboard/"):
|
# if path.startswith('file:///mnt/onboard/'):
|
||||||
path = path.replace('file:///mnt/onboard/', self._main_prefix)
|
path = path.replace('file:///mnt/onboard/', self._main_prefix)
|
||||||
path = path.replace('file:///mnt/sd/', self._card_a_prefix)
|
path = path.replace('file:///mnt/sd/', self._card_a_prefix)
|
||||||
path = path.replace('/mnt/onboard/', self._main_prefix)
|
path = path.replace('/mnt/onboard/', self._main_prefix)
|
||||||
# print "Internal: " + path
|
# print 'Internal: ' + path
|
||||||
|
|
||||||
return path
|
return path
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ class CHMReader(CHMFile):
|
|||||||
else:
|
else:
|
||||||
frag = None
|
frag = None
|
||||||
name = self._deentity(li.object('param', {'name': 'Name'})[0]['value'])
|
name = self._deentity(li.object('param', {'name': 'Name'})[0]['value'])
|
||||||
# print "========>", name
|
# print '========>', name
|
||||||
toc.add_item(href, frag, name, play_order=self._playorder)
|
toc.add_item(href, frag, name, play_order=self._playorder)
|
||||||
self._playorder += 1
|
self._playorder += 1
|
||||||
if li.ul:
|
if li.ul:
|
||||||
|
@ -127,10 +127,10 @@ class CHMInput(InputFormatPlugin):
|
|||||||
strip_encoding_pats=True, resolve_entities=True)[0]
|
strip_encoding_pats=True, resolve_entities=True)[0]
|
||||||
hhcroot = html.fromstring(hhcdata)
|
hhcroot = html.fromstring(hhcdata)
|
||||||
toc = self._process_nodes(hhcroot)
|
toc = self._process_nodes(hhcroot)
|
||||||
# print("=============================")
|
# print('=============================')
|
||||||
# print("Printing hhcroot")
|
# print('Printing hhcroot')
|
||||||
# print(etree.tostring(hhcroot, pretty_print=True))
|
# print(etree.tostring(hhcroot, pretty_print=True))
|
||||||
# print("=============================")
|
# print('=============================')
|
||||||
log.debug('Found %d section nodes' % toc.count())
|
log.debug('Found %d section nodes' % toc.count())
|
||||||
htmlpath = os.path.splitext(hhcpath)[0] + '.html'
|
htmlpath = os.path.splitext(hhcpath)[0] + '.html'
|
||||||
base = os.path.dirname(os.path.abspath(htmlpath))
|
base = os.path.dirname(os.path.abspath(htmlpath))
|
||||||
|
@ -143,20 +143,20 @@ class DocAnalysis:
|
|||||||
maxLineLength=1900 # Discard larger than this to stay in range
|
maxLineLength=1900 # Discard larger than this to stay in range
|
||||||
buckets=20 # Each line is divided into a bucket based on length
|
buckets=20 # Each line is divided into a bucket based on length
|
||||||
|
|
||||||
# print("there are "+str(len(lines))+" lines")
|
# print('there are '+str(len(lines))+' lines')
|
||||||
# max = 0
|
# max = 0
|
||||||
# for line in self.lines:
|
# for line in self.lines:
|
||||||
# l = len(line)
|
# l = len(line)
|
||||||
# if l > max:
|
# if l > max:
|
||||||
# max = l
|
# max = l
|
||||||
# print("max line found is "+str(max))
|
# print('max line found is '+str(max))
|
||||||
# Build the line length histogram
|
# Build the line length histogram
|
||||||
hRaw = [0 for i in range(0,buckets)]
|
hRaw = [0 for i in range(0,buckets)]
|
||||||
for line in self.lines:
|
for line in self.lines:
|
||||||
l = len(line)
|
l = len(line)
|
||||||
if l > minLineLength and l < maxLineLength:
|
if l > minLineLength and l < maxLineLength:
|
||||||
l = int(l // 100)
|
l = int(l // 100)
|
||||||
# print("adding "+str(l))
|
# print('adding '+str(l))
|
||||||
hRaw[l]+=1
|
hRaw[l]+=1
|
||||||
|
|
||||||
# Normalize the histogram into percents
|
# Normalize the histogram into percents
|
||||||
@ -165,8 +165,8 @@ class DocAnalysis:
|
|||||||
h = [float(count)/totalLines for count in hRaw]
|
h = [float(count)/totalLines for count in hRaw]
|
||||||
else:
|
else:
|
||||||
h = []
|
h = []
|
||||||
# print("\nhRaw histogram lengths are: "+str(hRaw))
|
# print('\nhRaw histogram lengths are: '+str(hRaw))
|
||||||
# print(" percents are: "+str(h)+"\n")
|
# print(' percents are: '+str(h)+'\n')
|
||||||
|
|
||||||
# Find the biggest bucket
|
# Find the biggest bucket
|
||||||
maxValue = 0
|
maxValue = 0
|
||||||
@ -175,10 +175,10 @@ class DocAnalysis:
|
|||||||
maxValue = h[i]
|
maxValue = h[i]
|
||||||
|
|
||||||
if maxValue < percent:
|
if maxValue < percent:
|
||||||
# print("Line lengths are too variable. Not unwrapping.")
|
# print('Line lengths are too variable. Not unwrapping.')
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
# print(str(maxValue)+" of the lines were in one bucket")
|
# print(str(maxValue)+' of the lines were in one bucket')
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@ -528,7 +528,7 @@ class HTMLPreProcessor:
|
|||||||
docanalysis = DocAnalysis('pdf', html)
|
docanalysis = DocAnalysis('pdf', html)
|
||||||
length = docanalysis.line_length(getattr(self.extra_opts, 'unwrap_factor'))
|
length = docanalysis.line_length(getattr(self.extra_opts, 'unwrap_factor'))
|
||||||
if length:
|
if length:
|
||||||
# print("The pdf line length returned is " + str(length))
|
# print('The pdf line length returned is ' + str(length))
|
||||||
# unwrap em/en dashes
|
# unwrap em/en dashes
|
||||||
end_rules.append((re.compile(
|
end_rules.append((re.compile(
|
||||||
r'(?<=.{%i}[–—])\s*<p>\s*(?=[\[a-z\d])' % length), lambda match: ''))
|
r'(?<=.{%i}[–—])\s*<p>\s*(?=[\[a-z\d])' % length), lambda match: ''))
|
||||||
|
@ -628,7 +628,7 @@ class HeuristicProcessor:
|
|||||||
def check_paragraph(self, content):
|
def check_paragraph(self, content):
|
||||||
content = re.sub('\\s*</?span[^>]*>\\s*', '', content)
|
content = re.sub('\\s*</?span[^>]*>\\s*', '', content)
|
||||||
if re.match('.*["\'.!?:]$', content):
|
if re.match('.*["\'.!?:]$', content):
|
||||||
# print "detected this as a paragraph"
|
# print 'detected this as a paragraph'
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
@ -641,9 +641,9 @@ class HeuristicProcessor:
|
|||||||
html = re.sub('</?a[^>]*>', '', html)
|
html = re.sub('</?a[^>]*>', '', html)
|
||||||
|
|
||||||
def convert_styles(match):
|
def convert_styles(match):
|
||||||
# print "raw styles are: "+match.group('styles')
|
# print 'raw styles are: '+match.group('styles')
|
||||||
content = match.group('content')
|
content = match.group('content')
|
||||||
# print "raw content is: "+match.group('content')
|
# print 'raw content is: '+match.group('content')
|
||||||
image = match.group('image')
|
image = match.group('image')
|
||||||
|
|
||||||
is_paragraph = False
|
is_paragraph = False
|
||||||
@ -668,12 +668,12 @@ class HeuristicProcessor:
|
|||||||
else:
|
else:
|
||||||
styles = match.group('styles').split(';')
|
styles = match.group('styles').split(';')
|
||||||
is_paragraph = self.check_paragraph(content)
|
is_paragraph = self.check_paragraph(content)
|
||||||
# print "styles for this line are: "+str(styles)
|
# print 'styles for this line are: '+str(styles)
|
||||||
split_styles = []
|
split_styles = []
|
||||||
for style in styles:
|
for style in styles:
|
||||||
# print "style is: "+str(style)
|
# print 'style is: '+str(style)
|
||||||
newstyle = style.split(':')
|
newstyle = style.split(':')
|
||||||
# print "newstyle is: "+str(newstyle)
|
# print 'newstyle is: '+str(newstyle)
|
||||||
split_styles.append(newstyle)
|
split_styles.append(newstyle)
|
||||||
styles = split_styles
|
styles = split_styles
|
||||||
for style, setting in styles:
|
for style, setting in styles:
|
||||||
@ -710,18 +710,18 @@ class HeuristicProcessor:
|
|||||||
self.log.debug('padding bottom is: ' + str(setting[2]))
|
self.log.debug('padding bottom is: ' + str(setting[2]))
|
||||||
self.log.debug('padding left is: ' +str(setting[3]))
|
self.log.debug('padding left is: ' +str(setting[3]))
|
||||||
|
|
||||||
# print "text-align is: "+str(text_align)
|
# print 'text-align is: '+str(text_align)
|
||||||
# print "\n***\nline is:\n "+str(match.group(0))+'\n'
|
# print '\n***\nline is:\n '+str(match.group(0))+'\n'
|
||||||
if debugabby:
|
if debugabby:
|
||||||
# print "this line is a paragraph = "+str(is_paragraph)+", previous line was "+str(self.previous_was_paragraph)
|
# print 'this line is a paragraph = '+str(is_paragraph)+', previous line was '+str(self.previous_was_paragraph)
|
||||||
self.log.debug('styles for this line were:', styles)
|
self.log.debug('styles for this line were:', styles)
|
||||||
self.log.debug('newline is:')
|
self.log.debug('newline is:')
|
||||||
self.log.debug(blockquote_open_loop+blockquote_close_loop+
|
self.log.debug(blockquote_open_loop+blockquote_close_loop+
|
||||||
paragraph_before+'<p style="'+text_indent+text_align+
|
paragraph_before+'<p style="'+text_indent+text_align+
|
||||||
'">'+content+'</p>'+paragraph_after+'\n\n\n\n\n')
|
'">'+content+'</p>'+paragraph_after+'\n\n\n\n\n')
|
||||||
# print "is_paragraph is "+str(is_paragraph)+", previous_was_paragraph is "+str(self.previous_was_paragraph)
|
# print 'is_paragraph is '+str(is_paragraph)+', previous_was_paragraph is '+str(self.previous_was_paragraph)
|
||||||
self.previous_was_paragraph = is_paragraph
|
self.previous_was_paragraph = is_paragraph
|
||||||
# print "previous_was_paragraph is now set to "+str(self.previous_was_paragraph)+"\n\n\n"
|
# print 'previous_was_paragraph is now set to '+str(self.previous_was_paragraph)+'\n\n\n'
|
||||||
return blockquote_open_loop+blockquote_close_loop+paragraph_before+'<p style="'+text_indent+text_align+'">'+content+'</p>'+paragraph_after
|
return blockquote_open_loop+blockquote_close_loop+paragraph_before+'<p style="'+text_indent+text_align+'">'+content+'</p>'+paragraph_after
|
||||||
|
|
||||||
html = abbyy_line.sub(convert_styles, html)
|
html = abbyy_line.sub(convert_styles, html)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
""" Hyphenation, using Frank Liang's algorithm.
|
''' Hyphenation, using Frank Liang's algorithm.
|
||||||
|
|
||||||
This module provides a single function to hyphenate words. hyphenate_word takes
|
This module provides a single function to hyphenate words. hyphenate_word takes
|
||||||
a string (the word), and returns a list of parts that can be separated by hyphens.
|
a string (the word), and returns a list of parts that can be separated by hyphens.
|
||||||
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
Ned Batchelder, July 2007.
|
Ned Batchelder, July 2007.
|
||||||
This Python code is in the public domain.
|
This Python code is in the public domain.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
@ -283,12 +283,12 @@ class mssha1:
|
|||||||
return ''.join(['%02x' % c for c in bytearray(self.digest())])
|
return ''.join(['%02x' % c for c in bytearray(self.digest())])
|
||||||
|
|
||||||
def copy(self):
|
def copy(self):
|
||||||
"""Return a clone object.
|
'''Return a clone object.
|
||||||
|
|
||||||
Return a copy ('clone') of the md5 object. This can be used
|
Return a copy ('clone') of the md5 object. This can be used
|
||||||
to efficiently compute the digests of strings that share
|
to efficiently compute the digests of strings that share
|
||||||
a common initial substring.
|
a common initial substring.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
return copy.deepcopy(self)
|
return copy.deepcopy(self)
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ class xml_attr_field:
|
|||||||
self.attr= attr
|
self.attr= attr
|
||||||
|
|
||||||
def __get__(self, obj, typ=None):
|
def __get__(self, obj, typ=None):
|
||||||
""" Return the data in this field or '' if the field is empty """
|
''' Return the data in this field or '' if the field is empty '''
|
||||||
document = obj.info
|
document = obj.info
|
||||||
elems = document.getElementsByTagName(self.tag_name)
|
elems = document.getElementsByTagName(self.tag_name)
|
||||||
if len(elems):
|
if len(elems):
|
||||||
@ -170,7 +170,7 @@ class xml_field:
|
|||||||
self.parent = parent
|
self.parent = parent
|
||||||
|
|
||||||
def __get__(self, obj, typ=None):
|
def __get__(self, obj, typ=None):
|
||||||
""" Return the data in this field or '' if the field is empty """
|
''' Return the data in this field or '' if the field is empty '''
|
||||||
document = obj.info
|
document = obj.info
|
||||||
|
|
||||||
elems = document.getElementsByTagName(self.tag_name)
|
elems = document.getElementsByTagName(self.tag_name)
|
||||||
|
@ -397,7 +397,7 @@ class LrfTag:
|
|||||||
if p is None:
|
if p is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
# print " Writing tag", self.name
|
# print ' Writing tag', self.name
|
||||||
for f in self.format:
|
for f in self.format:
|
||||||
if isinstance(f, dict):
|
if isinstance(f, dict):
|
||||||
p = f[p]
|
p = f[p]
|
||||||
@ -565,7 +565,7 @@ class LrfObject:
|
|||||||
dotscode)))
|
dotscode)))
|
||||||
|
|
||||||
def write(self, lrf, encoding=None):
|
def write(self, lrf, encoding=None):
|
||||||
# print "Writing object", self.name
|
# print 'Writing object', self.name
|
||||||
LrfTag('ObjectStart', (self.objId, self.type)).write(lrf)
|
LrfTag('ObjectStart', (self.objId, self.type)).write(lrf)
|
||||||
|
|
||||||
for tag in self.tags:
|
for tag in self.tags:
|
||||||
|
@ -357,7 +357,7 @@ class LrsObject:
|
|||||||
|
|
||||||
|
|
||||||
class Book(Delegator):
|
class Book(Delegator):
|
||||||
"""
|
'''
|
||||||
Main class for any lrs or lrf. All objects must be appended to
|
Main class for any lrs or lrf. All objects must be appended to
|
||||||
the Book class in some way or another in order to be rendered as
|
the Book class in some way or another in order to be rendered as
|
||||||
an LRS or LRF file.
|
an LRS or LRF file.
|
||||||
@ -396,7 +396,7 @@ class Book(Delegator):
|
|||||||
Override the default SetDefault.
|
Override the default SetDefault.
|
||||||
|
|
||||||
There are several other settings -- see the BookInfo class for more.
|
There are several other settings -- see the BookInfo class for more.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self, textstyledefault=None, blockstyledefault=None,
|
def __init__(self, textstyledefault=None, blockstyledefault=None,
|
||||||
pagestyledefault=None,
|
pagestyledefault=None,
|
||||||
@ -1344,7 +1344,7 @@ class Page(LrsObject, LrsContainer):
|
|||||||
if hasattr(content, 'getReferencedObjIds'):
|
if hasattr(content, 'getReferencedObjIds'):
|
||||||
pageContent.update(content.getReferencedObjIds())
|
pageContent.update(content.getReferencedObjIds())
|
||||||
|
|
||||||
# print "page contents:", pageContent
|
# print 'page contents:', pageContent
|
||||||
# ObjectList not needed and causes slowdown in SONY LRF renderer
|
# ObjectList not needed and causes slowdown in SONY LRF renderer
|
||||||
# p.appendLrfTag(LrfTag("ObjectList", pageContent))
|
# p.appendLrfTag(LrfTag("ObjectList", pageContent))
|
||||||
p.appendLrfTag(LrfTag('Link', self.pageStyle.objId))
|
p.appendLrfTag(LrfTag('Link', self.pageStyle.objId))
|
||||||
@ -1485,7 +1485,7 @@ class TextBlock(LrsObject, LrsContainer):
|
|||||||
|
|
||||||
|
|
||||||
class Paragraph(LrsContainer):
|
class Paragraph(LrsContainer):
|
||||||
"""
|
'''
|
||||||
Note: <P> alone does not make a paragraph. Only a CR inserted
|
Note: <P> alone does not make a paragraph. Only a CR inserted
|
||||||
into a text block right after a <P> makes a real paragraph.
|
into a text block right after a <P> makes a real paragraph.
|
||||||
Two Paragraphs appended in a row act like a single Paragraph.
|
Two Paragraphs appended in a row act like a single Paragraph.
|
||||||
@ -1493,7 +1493,7 @@ class Paragraph(LrsContainer):
|
|||||||
Also note that there are few autoappenders for Paragraph (and
|
Also note that there are few autoappenders for Paragraph (and
|
||||||
the things that can go in it.) It's less confusing (to me) to use
|
the things that can go in it.) It's less confusing (to me) to use
|
||||||
explicit .append methods to build up the text stream.
|
explicit .append methods to build up the text stream.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self, text=None):
|
def __init__(self, text=None):
|
||||||
LrsContainer.__init__(self, [Text, CR, DropCaps, CharButton,
|
LrsContainer.__init__(self, [Text, CR, DropCaps, CharButton,
|
||||||
@ -1620,7 +1620,7 @@ class Button(LrsObject, LrsContainer):
|
|||||||
|
|
||||||
def toLrf(self, lrfWriter):
|
def toLrf(self, lrfWriter):
|
||||||
(refobj, refpage) = self.findJumpToRefs()
|
(refobj, refpage) = self.findJumpToRefs()
|
||||||
# print "Button writing JumpTo refobj=", jumpto.refobj, ", and refpage=", jumpto.refpage
|
# print 'Button writing JumpTo refobj=', jumpto.refobj, ', and refpage=', jumpto.refpage
|
||||||
button = LrfObject('Button', self.objId)
|
button = LrfObject('Button', self.objId)
|
||||||
button.appendLrfTag(LrfTag('buttonflags', 0x10)) # pushbutton
|
button.appendLrfTag(LrfTag('buttonflags', 0x10)) # pushbutton
|
||||||
button.appendLrfTag(LrfTag('PushButtonStart'))
|
button.appendLrfTag(LrfTag('PushButtonStart'))
|
||||||
|
@ -281,7 +281,7 @@ class MetadataUpdater:
|
|||||||
offset += 1
|
offset += 1
|
||||||
self.md_header['num_recs'] = ord(self.data[offset:offset+1])
|
self.md_header['num_recs'] = ord(self.data[offset:offset+1])
|
||||||
offset += 1
|
offset += 1
|
||||||
# print "self.md_header: %s" % self.md_header
|
# print 'self.md_header: %s' % self.md_header
|
||||||
|
|
||||||
self.metadata = {}
|
self.metadata = {}
|
||||||
self.md_seq = []
|
self.md_seq = []
|
||||||
|
@ -642,13 +642,13 @@ class DirContainer:
|
|||||||
|
|
||||||
|
|
||||||
class Metadata:
|
class Metadata:
|
||||||
"""A collection of OEB data model metadata.
|
'''A collection of OEB data model metadata.
|
||||||
|
|
||||||
Provides access to the list of items associated with a particular metadata
|
Provides access to the list of items associated with a particular metadata
|
||||||
term via the term's local name using either Python container or attribute
|
term via the term's local name using either Python container or attribute
|
||||||
syntax. Return an empty list for any terms with no currently associated
|
syntax. Return an empty list for any terms with no currently associated
|
||||||
metadata items.
|
metadata items.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
DC_TERMS = {'contributor', 'coverage', 'creator', 'date',
|
DC_TERMS = {'contributor', 'coverage', 'creator', 'date',
|
||||||
'description', 'format', 'identifier', 'language',
|
'description', 'format', 'identifier', 'language',
|
||||||
@ -910,7 +910,7 @@ class Manifest:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
class Item:
|
class Item:
|
||||||
"""An OEB data model book content file.
|
'''An OEB data model book content file.
|
||||||
|
|
||||||
Provides the following data members for accessing the file content and
|
Provides the following data members for accessing the file content and
|
||||||
metadata associated with this particular file.
|
metadata associated with this particular file.
|
||||||
@ -927,7 +927,7 @@ class Manifest:
|
|||||||
primary linear reading order and `False` for textual content items
|
primary linear reading order and `False` for textual content items
|
||||||
which are not (such as footnotes). Meaningless for items which
|
which are not (such as footnotes). Meaningless for items which
|
||||||
have a :attr:`spine_position` of `None`.
|
have a :attr:`spine_position` of `None`.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self, oeb, id, href, media_type,
|
def __init__(self, oeb, id, href, media_type,
|
||||||
fallback=None, loader=str, data=None):
|
fallback=None, loader=str, data=None):
|
||||||
@ -1033,7 +1033,7 @@ class Manifest:
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def data(self):
|
def data(self):
|
||||||
"""Provides MIME type sensitive access to the manifest
|
'''Provides MIME type sensitive access to the manifest
|
||||||
entry's associated content.
|
entry's associated content.
|
||||||
|
|
||||||
- XHTML, HTML, and variant content is parsed as necessary to
|
- XHTML, HTML, and variant content is parsed as necessary to
|
||||||
@ -1044,7 +1044,7 @@ class Manifest:
|
|||||||
CSS DOM stylesheet.
|
CSS DOM stylesheet.
|
||||||
- All other content is returned as a :class:`str` or :class:`bytes`
|
- All other content is returned as a :class:`str` or :class:`bytes`
|
||||||
object with no special parsing.
|
object with no special parsing.
|
||||||
"""
|
'''
|
||||||
data = self._data
|
data = self._data
|
||||||
if data is None:
|
if data is None:
|
||||||
data = self.data_as_bytes_or_none
|
data = self.data_as_bytes_or_none
|
||||||
@ -1177,7 +1177,7 @@ class Manifest:
|
|||||||
self.hrefs = {}
|
self.hrefs = {}
|
||||||
|
|
||||||
def add(self, id, href, media_type, fallback=None, loader=None, data=None):
|
def add(self, id, href, media_type, fallback=None, loader=None, data=None):
|
||||||
"""Add a new item to the book manifest.
|
'''Add a new item to the book manifest.
|
||||||
|
|
||||||
The item's :param:`id`, :param:`href`, and :param:`media_type` are all
|
The item's :param:`id`, :param:`href`, and :param:`media_type` are all
|
||||||
required. A :param:`fallback` item-id is required for any items with a
|
required. A :param:`fallback` item-id is required for any items with a
|
||||||
@ -1185,7 +1185,7 @@ class Manifest:
|
|||||||
item's data itself may be provided with :param:`data`, or a loader
|
item's data itself may be provided with :param:`data`, or a loader
|
||||||
function for the data may be provided with :param:`loader`, or the
|
function for the data may be provided with :param:`loader`, or the
|
||||||
item's data may later be set manually via the :attr:`data` attribute.
|
item's data may later be set manually via the :attr:`data` attribute.
|
||||||
"""
|
'''
|
||||||
item = self.Item(
|
item = self.Item(
|
||||||
self.oeb, id, href, media_type, fallback, loader, data)
|
self.oeb, id, href, media_type, fallback, loader, data)
|
||||||
self.items.add(item)
|
self.items.add(item)
|
||||||
@ -1293,13 +1293,13 @@ class Manifest:
|
|||||||
|
|
||||||
|
|
||||||
class Spine:
|
class Spine:
|
||||||
"""Collection of manifest items composing an OEB data model book's main
|
'''Collection of manifest items composing an OEB data model book's main
|
||||||
textual content.
|
textual content.
|
||||||
|
|
||||||
The spine manages which manifest items compose the book's main textual
|
The spine manages which manifest items compose the book's main textual
|
||||||
content and the sequence in which they appear. Provides Python container
|
content and the sequence in which they appear. Provides Python container
|
||||||
access as a list-like object.
|
access as a list-like object.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self, oeb):
|
def __init__(self, oeb):
|
||||||
self.oeb = oeb
|
self.oeb = oeb
|
||||||
@ -1684,14 +1684,14 @@ class TOC:
|
|||||||
|
|
||||||
|
|
||||||
class PageList:
|
class PageList:
|
||||||
"""Collection of named "pages" to mapped positions within an OEB data model
|
'''Collection of named "pages" to mapped positions within an OEB data model
|
||||||
book's textual content.
|
book's textual content.
|
||||||
|
|
||||||
Provides list-like access to the pages.
|
Provides list-like access to the pages.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
class Page:
|
class Page:
|
||||||
"""Represents a mapping between a page name and a position within
|
'''Represents a mapping between a page name and a position within
|
||||||
the book content.
|
the book content.
|
||||||
|
|
||||||
Provides the following instance data attributes:
|
Provides the following instance data attributes:
|
||||||
@ -1705,7 +1705,7 @@ class PageList:
|
|||||||
fashion in print, such as the cover and title pages).
|
fashion in print, such as the cover and title pages).
|
||||||
:attr:`klass`: Optional semantic class of this page.
|
:attr:`klass`: Optional semantic class of this page.
|
||||||
:attr:`id`: Optional unique identifier for this page.
|
:attr:`id`: Optional unique identifier for this page.
|
||||||
"""
|
'''
|
||||||
TYPES = {'front', 'normal', 'special'}
|
TYPES = {'front', 'normal', 'special'}
|
||||||
|
|
||||||
def __init__(self, name, href, type='normal', klass=None, id=None):
|
def __init__(self, name, href, type='normal', klass=None, id=None):
|
||||||
@ -1847,7 +1847,7 @@ class OEBBook:
|
|||||||
return cls(encoding=encoding, pretty_print=pretty_print)
|
return cls(encoding=encoding, pretty_print=pretty_print)
|
||||||
|
|
||||||
def translate(self, text):
|
def translate(self, text):
|
||||||
"""Translate :param:`text` into the book's primary language."""
|
'''Translate :param:`text` into the book's primary language.'''
|
||||||
lang = str(self.metadata.language[0])
|
lang = str(self.metadata.language[0])
|
||||||
lang = lang.split('-', 1)[0].lower()
|
lang = lang.split('-', 1)[0].lower()
|
||||||
return translate(lang, text)
|
return translate(lang, text)
|
||||||
@ -1887,11 +1887,11 @@ class OEBBook:
|
|||||||
return fix_data(data)
|
return fix_data(data)
|
||||||
|
|
||||||
def to_opf1(self):
|
def to_opf1(self):
|
||||||
"""Produce OPF 1.2 representing the book's metadata and structure.
|
'''Produce OPF 1.2 representing the book's metadata and structure.
|
||||||
|
|
||||||
Returns a dictionary in which the keys are MIME types and the values
|
Returns a dictionary in which the keys are MIME types and the values
|
||||||
are tuples of (default) filenames and lxml.etree element structures.
|
are tuples of (default) filenames and lxml.etree element structures.
|
||||||
"""
|
'''
|
||||||
package = etree.Element('package',
|
package = etree.Element('package',
|
||||||
attrib={'unique-identifier': self.uid.id})
|
attrib={'unique-identifier': self.uid.id})
|
||||||
self.metadata.to_opf1(package)
|
self.metadata.to_opf1(package)
|
||||||
@ -1967,11 +1967,11 @@ class OEBBook:
|
|||||||
return ncx
|
return ncx
|
||||||
|
|
||||||
def to_opf2(self, page_map=False):
|
def to_opf2(self, page_map=False):
|
||||||
"""Produce OPF 2.0 representing the book's metadata and structure.
|
'''Produce OPF 2.0 representing the book's metadata and structure.
|
||||||
|
|
||||||
Returns a dictionary in which the keys are MIME types and the values
|
Returns a dictionary in which the keys are MIME types and the values
|
||||||
are tuples of (default) filenames and lxml.etree element structures.
|
are tuples of (default) filenames and lxml.etree element structures.
|
||||||
"""
|
'''
|
||||||
results = {}
|
results = {}
|
||||||
package = etree.Element(OPF('package'),
|
package = etree.Element(OPF('package'),
|
||||||
attrib={'version': '2.0', 'unique-identifier': self.uid.id},
|
attrib={'version': '2.0', 'unique-identifier': self.uid.id},
|
||||||
|
@ -315,7 +315,7 @@ class Document:
|
|||||||
if not REGEXES['divToPElementsRe'].search(str(''.join(map(tounicode, list(elem))))):
|
if not REGEXES['divToPElementsRe'].search(str(''.join(map(tounicode, list(elem))))):
|
||||||
# self.debug("Altering %s to p" % (describe(elem)))
|
# self.debug("Altering %s to p" % (describe(elem)))
|
||||||
elem.tag = 'p'
|
elem.tag = 'p'
|
||||||
# print("Fixed element "+describe(elem))
|
# print('Fixed element '+describe(elem))
|
||||||
|
|
||||||
for elem in self.tags(self.html, 'div'):
|
for elem in self.tags(self.html, 'div'):
|
||||||
if elem.text and elem.text.strip():
|
if elem.text and elem.text.strip():
|
||||||
@ -323,7 +323,7 @@ class Document:
|
|||||||
p.text = elem.text
|
p.text = elem.text
|
||||||
elem.text = None
|
elem.text = None
|
||||||
elem.insert(0, p)
|
elem.insert(0, p)
|
||||||
# print("Appended "+tounicode(p)+" to "+describe(elem))
|
# print('Appended '+tounicode(p)+' to '+describe(elem))
|
||||||
|
|
||||||
for pos, child in reversed(list(enumerate(elem))):
|
for pos, child in reversed(list(enumerate(elem))):
|
||||||
if child.tail and child.tail.strip():
|
if child.tail and child.tail.strip():
|
||||||
@ -331,7 +331,7 @@ class Document:
|
|||||||
p.text = child.tail
|
p.text = child.tail
|
||||||
child.tail = None
|
child.tail = None
|
||||||
elem.insert(pos + 1, p)
|
elem.insert(pos + 1, p)
|
||||||
# print("Inserted "+tounicode(p)+" to "+describe(elem))
|
# print('Inserted '+tounicode(p)+' to '+describe(elem))
|
||||||
if child.tag == 'br':
|
if child.tag == 'br':
|
||||||
# print('Dropped <br> at '+describe(elem))
|
# print('Dropped <br> at '+describe(elem))
|
||||||
child.drop_tree()
|
child.drop_tree()
|
||||||
|
@ -142,7 +142,7 @@ class ParseRtf:
|
|||||||
char_data='',
|
char_data='',
|
||||||
default_encoding='cp1252',
|
default_encoding='cp1252',
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
'file' --file to parse
|
'file' --file to parse
|
||||||
'char_data' --file containing character maps
|
'char_data' --file containing character maps
|
||||||
@ -157,7 +157,7 @@ class ParseRtf:
|
|||||||
'check_brackets' -- make sure the brackets match up after each run
|
'check_brackets' -- make sure the brackets match up after each run
|
||||||
through a file. Only for debugging.
|
through a file. Only for debugging.
|
||||||
Returns: Nothing
|
Returns: Nothing
|
||||||
"""
|
'''
|
||||||
|
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__out_file = out_file
|
self.__out_file = out_file
|
||||||
@ -210,14 +210,14 @@ class ParseRtf:
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
def parse_rtf(self):
|
def parse_rtf(self):
|
||||||
"""
|
'''
|
||||||
Parse the file by calling on other classes.
|
Parse the file by calling on other classes.
|
||||||
Requires:
|
Requires:
|
||||||
Nothing
|
Nothing
|
||||||
Returns:
|
Returns:
|
||||||
A parsed file in XML, either to standard output or to a file,
|
A parsed file in XML, either to standard output or to a file,
|
||||||
depending on the value of 'output' when the instance was created.
|
depending on the value of 'output' when the instance was created.
|
||||||
"""
|
'''
|
||||||
self.__temp_file = self.__make_temp_file(self.__file)
|
self.__temp_file = self.__make_temp_file(self.__file)
|
||||||
# if the self.__deb_dir is true, then create a copy object,
|
# if the self.__deb_dir is true, then create a copy object,
|
||||||
# set the directory to write to, remove files, and copy
|
# set the directory to write to, remove files, and copy
|
||||||
|
@ -35,7 +35,7 @@ class AddBrackets:
|
|||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
Optional:
|
Optional:
|
||||||
@ -44,7 +44,7 @@ class AddBrackets:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
|
@ -34,7 +34,7 @@ class BodyStyles:
|
|||||||
bug_handler,
|
bug_handler,
|
||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,):
|
run_level=1,):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
'table_data' -- a dictionary for each table.
|
'table_data' -- a dictionary for each table.
|
||||||
@ -44,7 +44,7 @@ class BodyStyles:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
|
@ -31,7 +31,7 @@ class Colors:
|
|||||||
copy=None,
|
copy=None,
|
||||||
run_level=1
|
run_level=1
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
Optional:
|
Optional:
|
||||||
@ -40,7 +40,7 @@ class Colors:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
@ -92,12 +92,12 @@ class Colors:
|
|||||||
nothing
|
nothing
|
||||||
Logic:
|
Logic:
|
||||||
get the hex number from the line and add it to the color string.
|
get the hex number from the line and add it to the color string.
|
||||||
'''
|
'''
|
||||||
hex_num = line[-3:-1]
|
hex_num = line[-3:-1]
|
||||||
self.__color_string += hex_num
|
self.__color_string += hex_num
|
||||||
|
|
||||||
def __blue_func(self, line):
|
def __blue_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line
|
line
|
||||||
Returns:
|
Returns:
|
||||||
@ -108,7 +108,7 @@ class Colors:
|
|||||||
as the key, and the hex number as the value. Write an empty tag
|
as the key, and the hex number as the value. Write an empty tag
|
||||||
with the hex number and number as attributes. Add one to the color
|
with the hex number and number as attributes. Add one to the color
|
||||||
number. Reset the color string to '#'
|
number. Reset the color string to '#'
|
||||||
"""
|
'''
|
||||||
hex_num = line[-3:-1]
|
hex_num = line[-3:-1]
|
||||||
self.__color_string += hex_num
|
self.__color_string += hex_num
|
||||||
self.__color_dict[self.__color_num] = self.__color_string
|
self.__color_dict[self.__color_num] = self.__color_string
|
||||||
@ -130,7 +130,7 @@ class Colors:
|
|||||||
change the state to after the color table.
|
change the state to after the color table.
|
||||||
Otherwise, get a function by passing the self.__token_info to the
|
Otherwise, get a function by passing the self.__token_info to the
|
||||||
state dictionary.
|
state dictionary.
|
||||||
'''
|
'''
|
||||||
# mi<mk<clrtbl-beg
|
# mi<mk<clrtbl-beg
|
||||||
# cw<ci<red_______<nu<00
|
# cw<ci<red_______<nu<00
|
||||||
if self.__token_info == 'mi<mk<clrtbl-end':
|
if self.__token_info == 'mi<mk<clrtbl-end':
|
||||||
|
@ -24,7 +24,7 @@ class ConvertToTags:
|
|||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'
|
'file'
|
||||||
Optional:
|
Optional:
|
||||||
@ -33,7 +33,7 @@ class ConvertToTags:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -105,13 +105,13 @@ class ConvertToTags:
|
|||||||
self.__write_extra_new_line()
|
self.__write_extra_new_line()
|
||||||
|
|
||||||
def __open_att_func(self, line):
|
def __open_att_func(self, line):
|
||||||
"""
|
'''
|
||||||
Process lines for open tags that have attributes.
|
Process lines for open tags that have attributes.
|
||||||
The important info is between [17:-1]. Take this info and split it
|
The important info is between [17:-1]. Take this info and split it
|
||||||
with the delimiter '<'. The first token in this group is the element
|
with the delimiter '<'. The first token in this group is the element
|
||||||
name. The rest are attributes, separated fromt their values by '>'. So
|
name. The rest are attributes, separated fromt their values by '>'. So
|
||||||
read each token one at a time, and split them by '>'.
|
read each token one at a time, and split them by '>'.
|
||||||
"""
|
'''
|
||||||
# mi<tg<open-att__<footnote<num>
|
# mi<tg<open-att__<footnote<num>
|
||||||
info = line[17:-1]
|
info = line[17:-1]
|
||||||
tokens = info.split('<')
|
tokens = info.split('<')
|
||||||
@ -140,9 +140,9 @@ class ConvertToTags:
|
|||||||
self.__write_extra_new_line()
|
self.__write_extra_new_line()
|
||||||
|
|
||||||
def __empty_att_func(self, line):
|
def __empty_att_func(self, line):
|
||||||
"""
|
'''
|
||||||
Same as the __open_att_func, except a '/' is placed at the end of the tag.
|
Same as the __open_att_func, except a '/' is placed at the end of the tag.
|
||||||
"""
|
'''
|
||||||
# mi<tg<open-att__<footnote<num>
|
# mi<tg<open-att__<footnote<num>
|
||||||
info = line[17:-1]
|
info = line[17:-1]
|
||||||
tokens = info.split('<')
|
tokens = info.split('<')
|
||||||
@ -260,7 +260,7 @@ class ConvertToTags:
|
|||||||
attributes.
|
attributes.
|
||||||
a closed function for closed tags.
|
a closed function for closed tags.
|
||||||
an empty tag function.
|
an empty tag function.
|
||||||
'''
|
'''
|
||||||
self.__initiate_values()
|
self.__initiate_values()
|
||||||
with open_for_write(self.__write_to) as self.__write_obj:
|
with open_for_write(self.__write_to) as self.__write_obj:
|
||||||
self.__write_dec()
|
self.__write_dec()
|
||||||
|
@ -95,8 +95,8 @@ class DeleteInfo:
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
def __delete_func(self,line):
|
def __delete_func(self,line):
|
||||||
"""Handle lines when in delete state. Don't print out lines
|
'''Handle lines when in delete state. Don't print out lines
|
||||||
unless the state has ended."""
|
unless the state has ended.'''
|
||||||
if self.__delete_count == self.__cb_count:
|
if self.__delete_count == self.__cb_count:
|
||||||
self.__state = 'default'
|
self.__state = 'default'
|
||||||
if self.__write_cb:
|
if self.__write_cb:
|
||||||
|
@ -182,7 +182,7 @@ class FieldStrings:
|
|||||||
self.__link_switch = re.compile(r'\\l\s{1,}(.*?)\s')
|
self.__link_switch = re.compile(r'\\l\s{1,}(.*?)\s')
|
||||||
|
|
||||||
def process_string(self, my_string, type):
|
def process_string(self, my_string, type):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
my_string --the string to parse.
|
my_string --the string to parse.
|
||||||
type -- the type of string.
|
type -- the type of string.
|
||||||
@ -195,7 +195,7 @@ class FieldStrings:
|
|||||||
resulting list. This item is the field's type. Check for the
|
resulting list. This item is the field's type. Check for the
|
||||||
action in the field instructions dictionary for further parsing.
|
action in the field instructions dictionary for further parsing.
|
||||||
If no action is found, print out an error message.
|
If no action is found, print out an error message.
|
||||||
"""
|
'''
|
||||||
changed_string = ''
|
changed_string = ''
|
||||||
lines = my_string.split('\n')
|
lines = my_string.split('\n')
|
||||||
for line in lines:
|
for line in lines:
|
||||||
@ -770,7 +770,7 @@ class FieldStrings:
|
|||||||
return [None, None, the_string]
|
return [None, None, the_string]
|
||||||
|
|
||||||
def __symbol_func(self, field_name, name, line):
|
def __symbol_func(self, field_name, name, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
field_name --first name in the string.
|
field_name --first name in the string.
|
||||||
name -- the changed name according to the dictionary.
|
name -- the changed name according to the dictionary.
|
||||||
@ -792,7 +792,7 @@ class FieldStrings:
|
|||||||
string, and make this string the last item in a list. The first
|
string, and make this string the last item in a list. The first
|
||||||
item in the list is the simple word 'symbol', which tells me that
|
item in the list is the simple word 'symbol', which tells me that
|
||||||
I don't really have field, but UTF-8 data.
|
I don't really have field, but UTF-8 data.
|
||||||
"""
|
'''
|
||||||
num = ''
|
num = ''
|
||||||
font = ''
|
font = ''
|
||||||
font_size = ''
|
font_size = ''
|
||||||
|
@ -20,7 +20,7 @@ from . import open_for_read, open_for_write
|
|||||||
|
|
||||||
|
|
||||||
class FieldsLarge:
|
class FieldsLarge:
|
||||||
r"""
|
r'''
|
||||||
=========================
|
=========================
|
||||||
Logic
|
Logic
|
||||||
=========================
|
=========================
|
||||||
@ -92,7 +92,7 @@ Examples
|
|||||||
language="1024">1</inline></field></para>
|
language="1024">1</inline></field></para>
|
||||||
</paragraph-definition>
|
</paragraph-definition>
|
||||||
</field-block>
|
</field-block>
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
in_file,
|
in_file,
|
||||||
@ -100,7 +100,7 @@ Examples
|
|||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
Optional:
|
Optional:
|
||||||
@ -109,7 +109,7 @@ Examples
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -244,7 +244,7 @@ Examples
|
|||||||
self.__sec_in_field[-1] = 1
|
self.__sec_in_field[-1] = 1
|
||||||
|
|
||||||
def __found_field_instruction_func(self, line):
|
def __found_field_instruction_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line -- line to parse
|
line -- line to parse
|
||||||
Returns:
|
Returns:
|
||||||
@ -252,13 +252,13 @@ Examples
|
|||||||
Change the state to field instruction. Set the open bracket count of
|
Change the state to field instruction. Set the open bracket count of
|
||||||
the beginning of this field so you know when it ends. Set the closed
|
the beginning of this field so you know when it ends. Set the closed
|
||||||
bracket count to 0 so you don't prematureley exit this state.
|
bracket count to 0 so you don't prematureley exit this state.
|
||||||
"""
|
'''
|
||||||
self.__state = 'field_instruction'
|
self.__state = 'field_instruction'
|
||||||
self.__field_instruction_count = self.__ob_count
|
self.__field_instruction_count = self.__ob_count
|
||||||
self.__cb_count = 0
|
self.__cb_count = 0
|
||||||
|
|
||||||
def __field_instruction_func(self, line):
|
def __field_instruction_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line --line to parse
|
line --line to parse
|
||||||
Returns:
|
Returns:
|
||||||
@ -267,7 +267,7 @@ Examples
|
|||||||
Collect all the lines until the end of the field is reached.
|
Collect all the lines until the end of the field is reached.
|
||||||
Process these lines with the module rtr.field_strings.
|
Process these lines with the module rtr.field_strings.
|
||||||
Check if the field instruction is 'Symbol' (really UTF-8).
|
Check if the field instruction is 'Symbol' (really UTF-8).
|
||||||
"""
|
'''
|
||||||
if self.__cb_count == self.__field_instruction_count:
|
if self.__cb_count == self.__field_instruction_count:
|
||||||
# The closing bracket should be written, since the opening bracket
|
# The closing bracket should be written, since the opening bracket
|
||||||
# was written
|
# was written
|
||||||
|
@ -21,7 +21,7 @@ from . import open_for_read, open_for_write
|
|||||||
|
|
||||||
|
|
||||||
class FieldsSmall:
|
class FieldsSmall:
|
||||||
"""
|
'''
|
||||||
=================
|
=================
|
||||||
Purpose
|
Purpose
|
||||||
=================
|
=================
|
||||||
@ -36,7 +36,7 @@ is found, store the opening bracket count in a variable. Collect all the text
|
|||||||
until the closing bracket entry is found. Send the string to the module
|
until the closing bracket entry is found. Send the string to the module
|
||||||
field_strings to process it. Write the processed string to the output
|
field_strings to process it. Write the processed string to the output
|
||||||
file.
|
file.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
in_file,
|
in_file,
|
||||||
@ -44,7 +44,7 @@ file.
|
|||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
Optional:
|
Optional:
|
||||||
@ -53,7 +53,7 @@ file.
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -119,7 +119,7 @@ file.
|
|||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
|
|
||||||
def __found_bookmark_func(self, line, tag):
|
def __found_bookmark_func(self, line, tag):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line --the line to parse
|
line --the line to parse
|
||||||
Returns:
|
Returns:
|
||||||
@ -128,7 +128,7 @@ file.
|
|||||||
This function is called when a bookmark is found. The opening
|
This function is called when a bookmark is found. The opening
|
||||||
bracket count is stored int eh beginning bracket count. The state
|
bracket count is stored int eh beginning bracket count. The state
|
||||||
is changed to 'bookmark.'
|
is changed to 'bookmark.'
|
||||||
"""
|
'''
|
||||||
self.__beg_bracket_count = self.__ob_count
|
self.__beg_bracket_count = self.__ob_count
|
||||||
self.__cb_count = 0
|
self.__cb_count = 0
|
||||||
self.__state = 'bookmark'
|
self.__state = 'bookmark'
|
||||||
@ -164,7 +164,7 @@ file.
|
|||||||
self.__text_string += line[17:-1]
|
self.__text_string += line[17:-1]
|
||||||
|
|
||||||
def __parse_index_func(self, my_string):
|
def __parse_index_func(self, my_string):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
my_string --string to parse
|
my_string --string to parse
|
||||||
type --type of string
|
type --type of string
|
||||||
@ -179,7 +179,7 @@ file.
|
|||||||
sub entry element.
|
sub entry element.
|
||||||
If the token is a pargrah ending, ignore it, since I don't won't
|
If the token is a pargrah ending, ignore it, since I don't won't
|
||||||
paragraphs within toc or index entries.
|
paragraphs within toc or index entries.
|
||||||
"""
|
'''
|
||||||
my_string, see_string = self.__index_see_func(my_string)
|
my_string, see_string = self.__index_see_func(my_string)
|
||||||
my_string, bookmark_string = self.__index_bookmark_func(my_string)
|
my_string, bookmark_string = self.__index_bookmark_func(my_string)
|
||||||
italics, bold = self.__index__format_func(my_string)
|
italics, bold = self.__index__format_func(my_string)
|
||||||
@ -385,7 +385,7 @@ file.
|
|||||||
return my_changed_string
|
return my_changed_string
|
||||||
|
|
||||||
def __found_toc_index_func(self, line, tag):
|
def __found_toc_index_func(self, line, tag):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line --the line to parse
|
line --the line to parse
|
||||||
Returns:
|
Returns:
|
||||||
@ -394,7 +394,7 @@ file.
|
|||||||
This function is called when a toc or index entry is found. The opening
|
This function is called when a toc or index entry is found. The opening
|
||||||
bracket count is stored in the beginning bracket count. The state
|
bracket count is stored in the beginning bracket count. The state
|
||||||
is changed to 'toc_index.'
|
is changed to 'toc_index.'
|
||||||
"""
|
'''
|
||||||
self.__beg_bracket_count = self.__ob_count
|
self.__beg_bracket_count = self.__ob_count
|
||||||
self.__cb_count = 0
|
self.__cb_count = 0
|
||||||
self.__state = 'toc_index'
|
self.__state = 'toc_index'
|
||||||
|
@ -31,7 +31,7 @@ class Fonts:
|
|||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
'default_font_num'--the default font number
|
'default_font_num'--the default font number
|
||||||
@ -41,7 +41,7 @@ class Fonts:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -86,7 +86,7 @@ class Fonts:
|
|||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
|
|
||||||
def __font_table_func(self, line):
|
def __font_table_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line
|
line
|
||||||
Returns:
|
Returns:
|
||||||
@ -99,7 +99,7 @@ class Fonts:
|
|||||||
font to the default font (in case there is no number provided, in
|
font to the default font (in case there is no number provided, in
|
||||||
which case RTF assumes the number will be the default font.) Reset
|
which case RTF assumes the number will be the default font.) Reset
|
||||||
the test string (for the font name) to ''
|
the test string (for the font name) to ''
|
||||||
"""
|
'''
|
||||||
if self.__token_info == 'mi<mk<fonttb-end':
|
if self.__token_info == 'mi<mk<fonttb-end':
|
||||||
self.__state = 'after_font_table'
|
self.__state = 'after_font_table'
|
||||||
elif self.__token_info == 'mi<mk<fontit-beg':
|
elif self.__token_info == 'mi<mk<fontit-beg':
|
||||||
@ -125,7 +125,7 @@ class Fonts:
|
|||||||
dictionary. Also create an empty tag with the name and number
|
dictionary. Also create an empty tag with the name and number
|
||||||
as attributes.
|
as attributes.
|
||||||
Preamture end of font table
|
Preamture end of font table
|
||||||
'''
|
'''
|
||||||
# cw<ci<font-style<nu<4
|
# cw<ci<font-style<nu<4
|
||||||
# tx<nu<__________<Times;
|
# tx<nu<__________<Times;
|
||||||
if self.__token_info == 'mi<mk<fontit-end':
|
if self.__token_info == 'mi<mk<fontit-end':
|
||||||
@ -174,7 +174,7 @@ class Fonts:
|
|||||||
the name rather than the number.
|
the name rather than the number.
|
||||||
If the line does not contain font info, simply print it out to the
|
If the line does not contain font info, simply print it out to the
|
||||||
file.
|
file.
|
||||||
'''
|
'''
|
||||||
if self.__token_info == 'cw<ci<font-style':
|
if self.__token_info == 'cw<ci<font-style':
|
||||||
font_num = line[20:-1]
|
font_num = line[20:-1]
|
||||||
font_name = self.__font_table.get(font_num)
|
font_name = self.__font_table.get(font_num)
|
||||||
@ -205,7 +205,7 @@ class Fonts:
|
|||||||
tag for each individual font in the font table.
|
tag for each individual font in the font table.
|
||||||
If the state is after the font table, look for lines with font
|
If the state is after the font table, look for lines with font
|
||||||
info. Substitute a font name for a font number.
|
info. Substitute a font name for a font number.
|
||||||
'''
|
'''
|
||||||
self.__initiate_values()
|
self.__initiate_values()
|
||||||
with open_for_read(self.__file) as read_obj:
|
with open_for_read(self.__file) as read_obj:
|
||||||
with open_for_write(self.__write_to) as self.__write_obj:
|
with open_for_write(self.__write_to) as self.__write_obj:
|
||||||
|
@ -183,13 +183,13 @@ class Footnote:
|
|||||||
self.__write_to_foot_obj.write(line)
|
self.__write_to_foot_obj.write(line)
|
||||||
|
|
||||||
def __get_footnotes(self):
|
def __get_footnotes(self):
|
||||||
"""
|
'''
|
||||||
Private method to remove footnotes from main file. Read one line from
|
Private method to remove footnotes from main file. Read one line from
|
||||||
the main file at a time. If the state is 'body', call on the private
|
the main file at a time. If the state is 'body', call on the private
|
||||||
__get_foot_foot_func. Otherwise, call on the __get_foot_body_func.
|
__get_foot_foot_func. Otherwise, call on the __get_foot_body_func.
|
||||||
These two functions do the work of separating the footnotes form the
|
These two functions do the work of separating the footnotes form the
|
||||||
body.
|
body.
|
||||||
"""
|
'''
|
||||||
with open_for_read(self.__file) as read_obj:
|
with open_for_read(self.__file) as read_obj:
|
||||||
with open_for_write(self.__write_to) as self.__write_obj:
|
with open_for_write(self.__write_to) as self.__write_obj:
|
||||||
with open_for_write(self.__footnote_holder) as self.__write_to_foot_obj:
|
with open_for_write(self.__footnote_holder) as self.__write_to_foot_obj:
|
||||||
|
@ -20,17 +20,12 @@ class GetCharMap:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(self, bug_handler, char_file):
|
def __init__(self, bug_handler, char_file):
|
||||||
"""
|
'''
|
||||||
|
|
||||||
Required:
|
Required:
|
||||||
|
|
||||||
'char_file'--the file with the mappings
|
'char_file'--the file with the mappings
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
|
||||||
nothing
|
nothing
|
||||||
|
'''
|
||||||
"""
|
|
||||||
self.__char_file = char_file
|
self.__char_file = char_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
|
|
||||||
|
@ -21,12 +21,12 @@ from . import open_for_read, open_for_write
|
|||||||
|
|
||||||
|
|
||||||
class GroupBorders:
|
class GroupBorders:
|
||||||
"""
|
'''
|
||||||
Form lists.
|
Form lists.
|
||||||
Use RTF's own formatting to determine if a paragraph definition is part of a
|
Use RTF's own formatting to determine if a paragraph definition is part of a
|
||||||
list.
|
list.
|
||||||
Use indents to determine items and how lists are nested.
|
Use indents to determine items and how lists are nested.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
in_file,
|
in_file,
|
||||||
@ -35,7 +35,7 @@ class GroupBorders:
|
|||||||
run_level=1,
|
run_level=1,
|
||||||
wrap=0,
|
wrap=0,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'
|
'file'
|
||||||
Optional:
|
Optional:
|
||||||
@ -44,7 +44,7 @@ class GroupBorders:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -123,7 +123,7 @@ class GroupBorders:
|
|||||||
self.__last_border_string = ''
|
self.__last_border_string = ''
|
||||||
|
|
||||||
def __in_pard_func(self, line):
|
def __in_pard_func(self, line):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
line -- the line of current text.
|
line -- the line of current text.
|
||||||
Return:
|
Return:
|
||||||
@ -131,7 +131,7 @@ class GroupBorders:
|
|||||||
Logic:
|
Logic:
|
||||||
You are in a list, but in the middle of a paragraph definition.
|
You are in a list, but in the middle of a paragraph definition.
|
||||||
Don't do anything until you find the end of the paragraph definition.
|
Don't do anything until you find the end of the paragraph definition.
|
||||||
"""
|
'''
|
||||||
if self.__token_info == 'mi<tg<close_____' \
|
if self.__token_info == 'mi<tg<close_____' \
|
||||||
and line[17:-1] == 'paragraph-definition':
|
and line[17:-1] == 'paragraph-definition':
|
||||||
self.__state = 'after_pard'
|
self.__state = 'after_pard'
|
||||||
@ -220,7 +220,7 @@ class GroupBorders:
|
|||||||
Look for the start of a paragraph definition. If one is found, check if
|
Look for the start of a paragraph definition. If one is found, check if
|
||||||
it contains a list-id. If it does, start a list. Change the state to
|
it contains a list-id. If it does, start a list. Change the state to
|
||||||
in_pard.
|
in_pard.
|
||||||
'''
|
'''
|
||||||
if self.__token_info == 'mi<tg<open-att__' \
|
if self.__token_info == 'mi<tg<open-att__' \
|
||||||
and line[17:37] == 'paragraph-definition':
|
and line[17:37] == 'paragraph-definition':
|
||||||
contains_border = self.__is_border_func(line)
|
contains_border = self.__is_border_func(line)
|
||||||
|
@ -21,12 +21,12 @@ from . import open_for_read, open_for_write
|
|||||||
|
|
||||||
|
|
||||||
class GroupStyles:
|
class GroupStyles:
|
||||||
"""
|
'''
|
||||||
Form lists.
|
Form lists.
|
||||||
Use RTF's own formatting to determine if a paragraph definition is part of a
|
Use RTF's own formatting to determine if a paragraph definition is part of a
|
||||||
list.
|
list.
|
||||||
Use indents to determine items and how lists are nested.
|
Use indents to determine items and how lists are nested.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
in_file,
|
in_file,
|
||||||
@ -35,7 +35,7 @@ class GroupStyles:
|
|||||||
run_level=1,
|
run_level=1,
|
||||||
wrap=0,
|
wrap=0,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'
|
'file'
|
||||||
Optional:
|
Optional:
|
||||||
@ -44,7 +44,7 @@ class GroupStyles:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -112,7 +112,7 @@ class GroupStyles:
|
|||||||
self.__line_num = 0
|
self.__line_num = 0
|
||||||
|
|
||||||
def __in_pard_func(self, line):
|
def __in_pard_func(self, line):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
line -- the line of current text.
|
line -- the line of current text.
|
||||||
Return:
|
Return:
|
||||||
@ -120,7 +120,7 @@ class GroupStyles:
|
|||||||
Logic:
|
Logic:
|
||||||
You are in a list, but in the middle of a paragraph definition.
|
You are in a list, but in the middle of a paragraph definition.
|
||||||
Don't do anything until you find the end of the paragraph definition.
|
Don't do anything until you find the end of the paragraph definition.
|
||||||
"""
|
'''
|
||||||
if self.__token_info == 'mi<tg<close_____' \
|
if self.__token_info == 'mi<tg<close_____' \
|
||||||
and line[17:-1] == 'paragraph-definition':
|
and line[17:-1] == 'paragraph-definition':
|
||||||
self.__state = 'after_pard'
|
self.__state = 'after_pard'
|
||||||
@ -213,7 +213,7 @@ class GroupStyles:
|
|||||||
Look for the start of a paragraph definition. If one is found, check if
|
Look for the start of a paragraph definition. If one is found, check if
|
||||||
it contains a list-id. If it does, start a list. Change the state to
|
it contains a list-id. If it does, start a list. Change the state to
|
||||||
in_pard.
|
in_pard.
|
||||||
'''
|
'''
|
||||||
if self.__token_info == 'mi<tg<open-att__' \
|
if self.__token_info == 'mi<tg<open-att__' \
|
||||||
and line[17:37] == 'paragraph-definition':
|
and line[17:37] == 'paragraph-definition':
|
||||||
self.__state = 'in_pard'
|
self.__state = 'in_pard'
|
||||||
|
@ -183,13 +183,13 @@ class Header:
|
|||||||
self.__write_to_head_obj.write(line)
|
self.__write_to_head_obj.write(line)
|
||||||
|
|
||||||
def __get_headers(self):
|
def __get_headers(self):
|
||||||
"""
|
'''
|
||||||
Private method to remove footnotes from main file. Read one line from
|
Private method to remove footnotes from main file. Read one line from
|
||||||
the main file at a time. If the state is 'body', call on the private
|
the main file at a time. If the state is 'body', call on the private
|
||||||
__get_foot_foot_func. Otherwise, call on the __get_foot_body_func.
|
__get_foot_foot_func. Otherwise, call on the __get_foot_body_func.
|
||||||
These two functions do the work of separating the footnotes form the
|
These two functions do the work of separating the footnotes form the
|
||||||
body.
|
body.
|
||||||
"""
|
'''
|
||||||
with open_for_read(self.__file) as read_obj:
|
with open_for_read(self.__file) as read_obj:
|
||||||
with open_for_write(self.__write_to) as self.__write_obj:
|
with open_for_write(self.__write_to) as self.__write_obj:
|
||||||
with open_for_write(self.__header_holder) as self.__write_to_head_obj:
|
with open_for_write(self.__header_holder) as self.__write_to_head_obj:
|
||||||
|
@ -29,7 +29,7 @@ class HeadingsToSections:
|
|||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'
|
'file'
|
||||||
Optional:
|
Optional:
|
||||||
@ -38,7 +38,7 @@ class HeadingsToSections:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -154,7 +154,7 @@ class HeadingsToSections:
|
|||||||
Look for the start of a paragraph definition. If one is found, check if
|
Look for the start of a paragraph definition. If one is found, check if
|
||||||
it contains a list-id. If it does, start a list. Change the state to
|
it contains a list-id. If it does, start a list. Change the state to
|
||||||
in_pard.
|
in_pard.
|
||||||
'''
|
'''
|
||||||
if self.__token_info == 'mi<mk<sect-start':
|
if self.__token_info == 'mi<mk<sect-start':
|
||||||
self.__section_num[0] += 1
|
self.__section_num[0] += 1
|
||||||
self.__section_num = self.__section_num[0:1]
|
self.__section_num = self.__section_num[0:1]
|
||||||
|
@ -42,7 +42,7 @@ class Hex2Utf8:
|
|||||||
dingbats=None,
|
dingbats=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'
|
'file'
|
||||||
'area_to_convert'--the area of file to convert
|
'area_to_convert'--the area of file to convert
|
||||||
@ -58,7 +58,7 @@ class Hex2Utf8:
|
|||||||
'convert_to_caps'--wether to convert caps to utf-8
|
'convert_to_caps'--wether to convert caps to utf-8
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
if area_to_convert not in ('preamble', 'body'):
|
if area_to_convert not in ('preamble', 'body'):
|
||||||
@ -99,7 +99,7 @@ class Hex2Utf8:
|
|||||||
caps=None,
|
caps=None,
|
||||||
dingbats=None,
|
dingbats=None,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'
|
'file'
|
||||||
'area_to_convert'--the area of file to convert
|
'area_to_convert'--the area of file to convert
|
||||||
@ -114,7 +114,7 @@ class Hex2Utf8:
|
|||||||
'convert_to_caps'--wether to convert caps to utf-8
|
'convert_to_caps'--wether to convert caps to utf-8
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file=file
|
self.__file=file
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
if area_to_convert not in ('preamble', 'body'):
|
if area_to_convert not in ('preamble', 'body'):
|
||||||
@ -217,7 +217,7 @@ class Hex2Utf8:
|
|||||||
self.__font_list = ['not-defined']
|
self.__font_list = ['not-defined']
|
||||||
|
|
||||||
def __hex_text_func(self, line):
|
def __hex_text_func(self, line):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'line' -- the line
|
'line' -- the line
|
||||||
Logic:
|
Logic:
|
||||||
@ -227,7 +227,7 @@ class Hex2Utf8:
|
|||||||
as normal text.
|
as normal text.
|
||||||
If the hex_num is not in the dictionary, then a mistake has been
|
If the hex_num is not in the dictionary, then a mistake has been
|
||||||
made.
|
made.
|
||||||
"""
|
'''
|
||||||
hex_num = line[17:-1]
|
hex_num = line[17:-1]
|
||||||
converted = self.__current_dict.get(hex_num)
|
converted = self.__current_dict.get(hex_num)
|
||||||
if converted is not None:
|
if converted is not None:
|
||||||
|
@ -31,7 +31,7 @@ class Info:
|
|||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
Optional:
|
Optional:
|
||||||
@ -40,7 +40,7 @@ class Info:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
|
@ -30,7 +30,7 @@ class Inline:
|
|||||||
bug_handler,
|
bug_handler,
|
||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,):
|
run_level=1,):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
Optional:
|
Optional:
|
||||||
@ -39,7 +39,7 @@ class Inline:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -153,14 +153,14 @@ class Inline:
|
|||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
|
|
||||||
def __found_open_bracket_func(self, line):
|
def __found_open_bracket_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line -- current line of text
|
line -- current line of text
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
Logic:
|
Logic:
|
||||||
Change the state to 'after_open_bracket'
|
Change the state to 'after_open_bracket'
|
||||||
"""
|
'''
|
||||||
self.__state = 'after_open_bracket'
|
self.__state = 'after_open_bracket'
|
||||||
self.__brac_count += 1
|
self.__brac_count += 1
|
||||||
self.__groups_in_waiting[0] += 1
|
self.__groups_in_waiting[0] += 1
|
||||||
@ -220,7 +220,7 @@ class Inline:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
def __close_bracket_func(self, line):
|
def __close_bracket_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line --line of text
|
line --line of text
|
||||||
Returns:
|
Returns:
|
||||||
@ -230,7 +230,7 @@ class Inline:
|
|||||||
Get the keys of the last dictionary in the inline_groups.
|
Get the keys of the last dictionary in the inline_groups.
|
||||||
If 'contains_inline' in the keys, write a close tag.
|
If 'contains_inline' in the keys, write a close tag.
|
||||||
If the_dict contains font information, write a mk tag.
|
If the_dict contains font information, write a mk tag.
|
||||||
"""
|
'''
|
||||||
if len(self.__inline_list) == 0:
|
if len(self.__inline_list) == 0:
|
||||||
# nothing to add
|
# nothing to add
|
||||||
return
|
return
|
||||||
@ -282,7 +282,7 @@ class Inline:
|
|||||||
self.__write_inline()
|
self.__write_inline()
|
||||||
|
|
||||||
def __write_inline(self):
|
def __write_inline(self):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
nothing
|
nothing
|
||||||
Returns
|
Returns
|
||||||
@ -298,7 +298,7 @@ class Inline:
|
|||||||
write a marker tag. (I will use this marker tag later when converting
|
write a marker tag. (I will use this marker tag later when converting
|
||||||
hext text to utf8.)
|
hext text to utf8.)
|
||||||
Write a tag for the inline values.
|
Write a tag for the inline values.
|
||||||
"""
|
'''
|
||||||
if self.__groups_in_waiting[0] != 0:
|
if self.__groups_in_waiting[0] != 0:
|
||||||
last_index = -1 * self.__groups_in_waiting[0]
|
last_index = -1 * self.__groups_in_waiting[0]
|
||||||
inline_list = self.__inline_list[last_index:]
|
inline_list = self.__inline_list[last_index:]
|
||||||
@ -383,10 +383,10 @@ class Inline:
|
|||||||
self.__groups_in_waiting[0] = 0
|
self.__groups_in_waiting[0] = 0
|
||||||
|
|
||||||
def __found_field_func(self, line):
|
def __found_field_func(self, line):
|
||||||
"""
|
'''
|
||||||
Just a default function to make sure I don't prematurely exit
|
Just a default function to make sure I don't prematurely exit
|
||||||
default state
|
default state
|
||||||
"""
|
'''
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def form_tags(self):
|
def form_tags(self):
|
||||||
|
@ -30,7 +30,7 @@ class ListNumbers:
|
|||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'
|
'file'
|
||||||
Optional:
|
Optional:
|
||||||
@ -39,7 +39,7 @@ class ListNumbers:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -71,7 +71,7 @@ class ListNumbers:
|
|||||||
self, line
|
self, line
|
||||||
Returns:
|
Returns:
|
||||||
Nothing
|
Nothing
|
||||||
'''
|
'''
|
||||||
if self.__token_info == 'cw<ls<list-text_':
|
if self.__token_info == 'cw<ls<list-text_':
|
||||||
self.__state = 'list_text'
|
self.__state = 'list_text'
|
||||||
self.__list_chunk = self.__list_chunk + \
|
self.__list_chunk = self.__list_chunk + \
|
||||||
@ -136,7 +136,7 @@ class ListNumbers:
|
|||||||
self, line
|
self, line
|
||||||
Returns:
|
Returns:
|
||||||
Nothing
|
Nothing
|
||||||
'''
|
'''
|
||||||
if self.__list_text_ob == self.__cb_count:
|
if self.__list_text_ob == self.__cb_count:
|
||||||
self.__state = 'after_list_text'
|
self.__state = 'after_list_text'
|
||||||
self.__right_after_list_text = 1
|
self.__right_after_list_text = 1
|
||||||
@ -155,7 +155,7 @@ class ListNumbers:
|
|||||||
self, line
|
self, line
|
||||||
Returns:
|
Returns:
|
||||||
Nothing
|
Nothing
|
||||||
'''
|
'''
|
||||||
if self.__token_info == 'ob<nu<open-brack':
|
if self.__token_info == 'ob<nu<open-brack':
|
||||||
self.__state = 'after_ob'
|
self.__state = 'after_ob'
|
||||||
self.__previous_line = line
|
self.__previous_line = line
|
||||||
@ -163,7 +163,7 @@ class ListNumbers:
|
|||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
|
|
||||||
def fix_list_numbers(self):
|
def fix_list_numbers(self):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
nothing
|
nothing
|
||||||
Returns:
|
Returns:
|
||||||
@ -178,7 +178,7 @@ class ListNumbers:
|
|||||||
found.
|
found.
|
||||||
Next, look for an open bracket or text. When either is found,
|
Next, look for an open bracket or text. When either is found,
|
||||||
print out self.__list_chunk and the line.
|
print out self.__list_chunk and the line.
|
||||||
"""
|
'''
|
||||||
self.__initiate_values()
|
self.__initiate_values()
|
||||||
read_obj = open_for_read(self.__file)
|
read_obj = open_for_read(self.__file)
|
||||||
self.__write_obj = open_for_write(self.__write_to)
|
self.__write_obj = open_for_write(self.__write_to)
|
||||||
|
@ -239,7 +239,7 @@ class ListTable:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
def __level_text_func(self, line):
|
def __level_text_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line --line to process
|
line --line to process
|
||||||
Returns:
|
Returns:
|
||||||
@ -252,7 +252,7 @@ class ListTable:
|
|||||||
This attribute indicates the puncuation after a certain level.
|
This attribute indicates the puncuation after a certain level.
|
||||||
An example is "level1-marker = '.'"
|
An example is "level1-marker = '.'"
|
||||||
Otherwise, check for a level-template-id.
|
Otherwise, check for a level-template-id.
|
||||||
"""
|
'''
|
||||||
if self.__token_info == 'cb<nu<clos-brack' and\
|
if self.__token_info == 'cb<nu<clos-brack' and\
|
||||||
self.__cb_count == self.__level_text_ob_count:
|
self.__cb_count == self.__level_text_ob_count:
|
||||||
if self.__prefix_string:
|
if self.__prefix_string:
|
||||||
|
@ -21,12 +21,12 @@ from . import open_for_read, open_for_write
|
|||||||
|
|
||||||
|
|
||||||
class MakeLists:
|
class MakeLists:
|
||||||
"""
|
'''
|
||||||
Form lists.
|
Form lists.
|
||||||
Use RTF's own formatting to determine if a paragraph definition is part of a
|
Use RTF's own formatting to determine if a paragraph definition is part of a
|
||||||
list.
|
list.
|
||||||
Use indents to determine items and how lists are nested.
|
Use indents to determine items and how lists are nested.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
in_file,
|
in_file,
|
||||||
@ -38,7 +38,7 @@ class MakeLists:
|
|||||||
no_headings_as_list=1,
|
no_headings_as_list=1,
|
||||||
write_list_info=0,
|
write_list_info=0,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'
|
'file'
|
||||||
Optional:
|
Optional:
|
||||||
@ -47,7 +47,7 @@ class MakeLists:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__run_level = run_level
|
self.__run_level = run_level
|
||||||
@ -114,7 +114,7 @@ class MakeLists:
|
|||||||
self.__line_num = 0
|
self.__line_num = 0
|
||||||
|
|
||||||
def __in_pard_func(self, line):
|
def __in_pard_func(self, line):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
line -- the line of current text.
|
line -- the line of current text.
|
||||||
Return:
|
Return:
|
||||||
@ -122,7 +122,7 @@ class MakeLists:
|
|||||||
Logic:
|
Logic:
|
||||||
You are in a list, but in the middle of a paragraph definition.
|
You are in a list, but in the middle of a paragraph definition.
|
||||||
Don't do anything until you find the end of the paragraph definition.
|
Don't do anything until you find the end of the paragraph definition.
|
||||||
"""
|
'''
|
||||||
if self.__token_info == 'mi<mk<pard-end__':
|
if self.__token_info == 'mi<mk<pard-end__':
|
||||||
self.__state = 'after_pard'
|
self.__state = 'after_pard'
|
||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
@ -395,7 +395,7 @@ class MakeLists:
|
|||||||
Look for the start of a paragraph definition. If one is found, check if
|
Look for the start of a paragraph definition. If one is found, check if
|
||||||
it contains a list-id. If it does, start a list. Change the state to
|
it contains a list-id. If it does, start a list. Change the state to
|
||||||
in_pard.
|
in_pard.
|
||||||
'''
|
'''
|
||||||
if self.__token_info == 'mi<tg<open-att__' and line[17:37] == 'paragraph-definition':
|
if self.__token_info == 'mi<tg<open-att__' and line[17:37] == 'paragraph-definition':
|
||||||
is_a_heading = self.__is_a_heading()
|
is_a_heading = self.__is_a_heading()
|
||||||
if not is_a_heading:
|
if not is_a_heading:
|
||||||
|
@ -27,7 +27,7 @@ class OldRtf:
|
|||||||
bug_handler,
|
bug_handler,
|
||||||
run_level,
|
run_level,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
'table_data' -- a dictionary for each table.
|
'table_data' -- a dictionary for each table.
|
||||||
@ -37,7 +37,7 @@ class OldRtf:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__run_level = run_level
|
self.__run_level = run_level
|
||||||
|
@ -2,7 +2,7 @@ import sys
|
|||||||
|
|
||||||
|
|
||||||
class ParseOptions:
|
class ParseOptions:
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
system_string --The string from the command line
|
system_string --The string from the command line
|
||||||
options_dict -- a dictionary with the key equal to the opition, and
|
options_dict -- a dictionary with the key equal to the opition, and
|
||||||
@ -29,7 +29,7 @@ class ParseOptions:
|
|||||||
print arguments
|
print arguments
|
||||||
The result will be:
|
The result will be:
|
||||||
{indents:None, output:'/home/paul/file'}, ['/home/paul/input']
|
{indents:None, output:'/home/paul/file'}, ['/home/paul/input']
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self, system_string, options_dict):
|
def __init__(self, system_string, options_dict):
|
||||||
self.__system_string = system_string[1:]
|
self.__system_string = system_string[1:]
|
||||||
@ -133,7 +133,7 @@ class ParseOptions:
|
|||||||
return new_string
|
return new_string
|
||||||
|
|
||||||
def __pair_arg_with_option(self):
|
def __pair_arg_with_option(self):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
nothing
|
nothing
|
||||||
Returns
|
Returns
|
||||||
@ -142,7 +142,7 @@ class ParseOptions:
|
|||||||
iterate through the system string, and match arguments with options:
|
iterate through the system string, and match arguments with options:
|
||||||
old_list = ['--foo', 'bar']
|
old_list = ['--foo', 'bar']
|
||||||
new_list = ['--foo=bar'
|
new_list = ['--foo=bar'
|
||||||
"""
|
'''
|
||||||
opt_len = len(self.__system_string)
|
opt_len = len(self.__system_string)
|
||||||
new_system_string = []
|
new_system_string = []
|
||||||
counter = 0
|
counter = 0
|
||||||
|
@ -30,7 +30,7 @@ class Output:
|
|||||||
out_file=None,
|
out_file=None,
|
||||||
no_ask=True
|
no_ask=True
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file' -- xml file ready to output
|
'file' -- xml file ready to output
|
||||||
orig_file -- original rtf file
|
orig_file -- original rtf file
|
||||||
@ -38,7 +38,7 @@ class Output:
|
|||||||
output_file -- the file to output to
|
output_file -- the file to output to
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = file
|
self.__file = file
|
||||||
self.__orig_file = orig_file
|
self.__orig_file = orig_file
|
||||||
self.__output_dir = output_dir
|
self.__output_dir = output_dir
|
||||||
|
@ -20,7 +20,7 @@ from . import open_for_read, open_for_write
|
|||||||
|
|
||||||
|
|
||||||
class ParagraphDef:
|
class ParagraphDef:
|
||||||
"""
|
'''
|
||||||
=================
|
=================
|
||||||
Purpose
|
Purpose
|
||||||
=================
|
=================
|
||||||
@ -52,7 +52,7 @@ be closed:
|
|||||||
5. after_para_def
|
5. after_para_def
|
||||||
'mi<mk<para-start' changes state to in_paragraphs
|
'mi<mk<para-start' changes state to in_paragraphs
|
||||||
if another paragraph_def is found, the state changes to collect_tokens.
|
if another paragraph_def is found, the state changes to collect_tokens.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
in_file,
|
in_file,
|
||||||
@ -60,7 +60,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
|||||||
default_font,
|
default_font,
|
||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,):
|
run_level=1,):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
'default_font' --document default font
|
'default_font' --document default font
|
||||||
@ -70,7 +70,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__default_font = default_font
|
self.__default_font = default_font
|
||||||
@ -423,7 +423,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
|||||||
self.__reset_dict()
|
self.__reset_dict()
|
||||||
|
|
||||||
def __end_para_def_func(self, line):
|
def __end_para_def_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
Nothing
|
Nothing
|
||||||
Returns:
|
Returns:
|
||||||
@ -433,13 +433,13 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
|||||||
of a paragraph. I want to output the definition tag; output the line
|
of a paragraph. I want to output the definition tag; output the line
|
||||||
itself (telling me of the beginning of a paragraph);change the
|
itself (telling me of the beginning of a paragraph);change the
|
||||||
state to 'in_paragraphs';
|
state to 'in_paragraphs';
|
||||||
"""
|
'''
|
||||||
self.__write_para_def_beg()
|
self.__write_para_def_beg()
|
||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
self.__state = 'in_paragraphs'
|
self.__state = 'in_paragraphs'
|
||||||
|
|
||||||
def __start_para_after_def_func(self, line):
|
def __start_para_after_def_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
Nothing
|
Nothing
|
||||||
Returns:
|
Returns:
|
||||||
@ -450,7 +450,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
|||||||
itself (telling me of the beginning of a paragraph);change the
|
itself (telling me of the beginning of a paragraph);change the
|
||||||
state to 'in_paragraphs'.
|
state to 'in_paragraphs'.
|
||||||
(I now realize that this is absolutely identical to the function above!)
|
(I now realize that this is absolutely identical to the function above!)
|
||||||
"""
|
'''
|
||||||
self.__write_para_def_beg()
|
self.__write_para_def_beg()
|
||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
self.__state = 'in_paragraphs'
|
self.__state = 'in_paragraphs'
|
||||||
@ -503,7 +503,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
|||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
|
|
||||||
def __after_para_end_func(self, line):
|
def __after_para_end_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line -- line to output
|
line -- line to output
|
||||||
Returns:
|
Returns:
|
||||||
@ -520,14 +520,14 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
|||||||
If you find the beginning of a paragraph, then you don't need to
|
If you find the beginning of a paragraph, then you don't need to
|
||||||
write out the paragraph definition. Write out the string, and
|
write out the paragraph definition. Write out the string, and
|
||||||
change the state to in paragraphs.
|
change the state to in paragraphs.
|
||||||
"""
|
'''
|
||||||
self.__text_string += line
|
self.__text_string += line
|
||||||
action = self.__after_para_end_dict.get(self.__token_info)
|
action = self.__after_para_end_dict.get(self.__token_info)
|
||||||
if action:
|
if action:
|
||||||
action(line)
|
action(line)
|
||||||
|
|
||||||
def __continue_block_func(self, line):
|
def __continue_block_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line --line to print out
|
line --line to print out
|
||||||
Returns:
|
Returns:
|
||||||
@ -537,14 +537,14 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
|||||||
start of a paragraph, so you don't need to print out the paragraph
|
start of a paragraph, so you don't need to print out the paragraph
|
||||||
definition. Print out the string, the line, and change the state
|
definition. Print out the string, the line, and change the state
|
||||||
to in paragraphs.
|
to in paragraphs.
|
||||||
"""
|
'''
|
||||||
self.__state = 'in_paragraphs'
|
self.__state = 'in_paragraphs'
|
||||||
self.__write_obj.write(self.__text_string)
|
self.__write_obj.write(self.__text_string)
|
||||||
self.__text_string = ''
|
self.__text_string = ''
|
||||||
# found a new paragraph definition after an end of a paragraph
|
# found a new paragraph definition after an end of a paragraph
|
||||||
|
|
||||||
def __new_para_def_func(self, line):
|
def __new_para_def_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line -- line to output
|
line -- line to output
|
||||||
Returns:
|
Returns:
|
||||||
@ -554,13 +554,13 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
|||||||
paragraph. Output the end of the old paragraph definition. Output
|
paragraph. Output the end of the old paragraph definition. Output
|
||||||
the text string. Output the line. Change the state to collect
|
the text string. Output the line. Change the state to collect
|
||||||
tokens. (And don't forget to set the text string to ''!)
|
tokens. (And don't forget to set the text string to ''!)
|
||||||
"""
|
'''
|
||||||
self.__write_para_def_end_func()
|
self.__write_para_def_end_func()
|
||||||
self.__found_para_def_func()
|
self.__found_para_def_func()
|
||||||
# after a paragraph and found reason to stop this block
|
# after a paragraph and found reason to stop this block
|
||||||
|
|
||||||
def __stop_block_func(self, line):
|
def __stop_block_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line --(shouldn't be here?)
|
line --(shouldn't be here?)
|
||||||
Returns:
|
Returns:
|
||||||
@ -570,7 +570,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
|||||||
than paragraph-definition. You want to write the end tag of the
|
than paragraph-definition. You want to write the end tag of the
|
||||||
old definition and reset the text string (handled by other
|
old definition and reset the text string (handled by other
|
||||||
methods).
|
methods).
|
||||||
"""
|
'''
|
||||||
self.__write_para_def_end_func()
|
self.__write_para_def_end_func()
|
||||||
self.__state = 'after_para_def'
|
self.__state = 'after_para_def'
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ from . import open_for_read, open_for_write
|
|||||||
|
|
||||||
|
|
||||||
class Paragraphs:
|
class Paragraphs:
|
||||||
"""
|
'''
|
||||||
=================
|
=================
|
||||||
Purpose
|
Purpose
|
||||||
=================
|
=================
|
||||||
@ -41,7 +41,7 @@ class Paragraphs:
|
|||||||
(\\par) marks the end of a paragraph. So does the end of a footnote or heading;
|
(\\par) marks the end of a paragraph. So does the end of a footnote or heading;
|
||||||
a paragraph definition; the end of a field-block; and the beginning of a
|
a paragraph definition; the end of a field-block; and the beginning of a
|
||||||
section. (How about the end of a section or the end of a field-block?)
|
section. (How about the end of a section or the end of a field-block?)
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
in_file,
|
in_file,
|
||||||
@ -50,7 +50,7 @@ class Paragraphs:
|
|||||||
write_empty_para=1,
|
write_empty_para=1,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
Optional:
|
Optional:
|
||||||
@ -59,7 +59,7 @@ class Paragraphs:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -108,7 +108,7 @@ class Paragraphs:
|
|||||||
}
|
}
|
||||||
|
|
||||||
def __before_body_func(self, line):
|
def __before_body_func(self, line):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
line -- line to parse
|
line -- line to parse
|
||||||
Returns:
|
Returns:
|
||||||
@ -116,7 +116,7 @@ class Paragraphs:
|
|||||||
Logic:
|
Logic:
|
||||||
This function handles all the lines before the start of the body.
|
This function handles all the lines before the start of the body.
|
||||||
Once the body starts, the state is switched to 'not_paragraph'
|
Once the body starts, the state is switched to 'not_paragraph'
|
||||||
"""
|
'''
|
||||||
if self.__token_info == 'mi<mk<body-open_':
|
if self.__token_info == 'mi<mk<body-open_':
|
||||||
self.__state = 'not_paragraph'
|
self.__state = 'not_paragraph'
|
||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
@ -232,7 +232,7 @@ class Paragraphs:
|
|||||||
self.__write_obj.write('mi<mk<bogus-pard\n')
|
self.__write_obj.write('mi<mk<bogus-pard\n')
|
||||||
|
|
||||||
def make_paragraphs(self):
|
def make_paragraphs(self):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
nothing
|
nothing
|
||||||
Returns:
|
Returns:
|
||||||
@ -243,7 +243,7 @@ class Paragraphs:
|
|||||||
beginning of the body.
|
beginning of the body.
|
||||||
When the body is found, change the state to 'not_paragraph'. The
|
When the body is found, change the state to 'not_paragraph'. The
|
||||||
only other state is 'paragraph'.
|
only other state is 'paragraph'.
|
||||||
"""
|
'''
|
||||||
self.__initiate_values()
|
self.__initiate_values()
|
||||||
with open_for_read(self.__file) as read_obj:
|
with open_for_read(self.__file) as read_obj:
|
||||||
with open_for_write(self.__write_to) as self.__write_obj:
|
with open_for_write(self.__write_to) as self.__write_obj:
|
||||||
|
@ -30,7 +30,7 @@ class PreambleDiv:
|
|||||||
no_namespace=None,
|
no_namespace=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'
|
'file'
|
||||||
Optional:
|
Optional:
|
||||||
@ -39,7 +39,7 @@ class PreambleDiv:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
|
@ -34,7 +34,7 @@ class Preamble:
|
|||||||
copy=None,
|
copy=None,
|
||||||
temp_dir=None,
|
temp_dir=None,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
file--file to parse
|
file--file to parse
|
||||||
platform --Windows or Macintosh
|
platform --Windows or Macintosh
|
||||||
@ -46,7 +46,7 @@ class Preamble:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file=file
|
self.__file=file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
|
@ -20,7 +20,7 @@ from . import open_for_read, open_for_write
|
|||||||
|
|
||||||
|
|
||||||
class Sections:
|
class Sections:
|
||||||
"""
|
'''
|
||||||
=================
|
=================
|
||||||
Purpose
|
Purpose
|
||||||
=================
|
=================
|
||||||
@ -53,14 +53,14 @@ class Sections:
|
|||||||
the list; use the second item in the description list.
|
the list; use the second item in the description list.
|
||||||
CHANGE (2004-04-26) No longer write sections that occur in field-blocks.
|
CHANGE (2004-04-26) No longer write sections that occur in field-blocks.
|
||||||
Instead, ignore all section information in a field-block.
|
Instead, ignore all section information in a field-block.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
in_file,
|
in_file,
|
||||||
bug_handler,
|
bug_handler,
|
||||||
copy=None,
|
copy=None,
|
||||||
run_level=1):
|
run_level=1):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
Optional:
|
Optional:
|
||||||
@ -69,7 +69,7 @@ class Sections:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -439,7 +439,7 @@ class Sections:
|
|||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
|
|
||||||
def __print_field_sec_attributes(self):
|
def __print_field_sec_attributes(self):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
nothing
|
nothing
|
||||||
Returns:
|
Returns:
|
||||||
@ -457,7 +457,7 @@ class Sections:
|
|||||||
my_string += '\n'
|
my_string += '\n'
|
||||||
else:
|
else:
|
||||||
my_string += 'mi<tg<open______<section-definition\n'
|
my_string += 'mi<tg<open______<section-definition\n'
|
||||||
"""
|
'''
|
||||||
num = self.__field_num[0]
|
num = self.__field_num[0]
|
||||||
self.__field_num = self.__field_num[1:]
|
self.__field_num = self.__field_num[1:]
|
||||||
self.__write_obj.write(
|
self.__write_obj.write(
|
||||||
|
@ -30,7 +30,7 @@ class Styles:
|
|||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,
|
run_level=1,
|
||||||
):
|
):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
Optional:
|
Optional:
|
||||||
@ -39,7 +39,7 @@ class Styles:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -283,7 +283,7 @@ class Styles:
|
|||||||
Write an error message if no key is found for the info.
|
Write an error message if no key is found for the info.
|
||||||
If the line is text, add the text to a text string. The text
|
If the line is text, add the text to a text string. The text
|
||||||
string will be the name of the style.
|
string will be the name of the style.
|
||||||
'''
|
'''
|
||||||
action = self.__state_dict.get(self.__token_info)
|
action = self.__state_dict.get(self.__token_info)
|
||||||
if action:
|
if action:
|
||||||
action(line)
|
action(line)
|
||||||
@ -313,7 +313,7 @@ class Styles:
|
|||||||
self.__text_string += line[17:-1]
|
self.__text_string += line[17:-1]
|
||||||
|
|
||||||
def __tab_stop_func(self, line):
|
def __tab_stop_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line -- line to parse
|
line -- line to parse
|
||||||
Returns:
|
Returns:
|
||||||
@ -321,7 +321,7 @@ class Styles:
|
|||||||
Logic:
|
Logic:
|
||||||
Try to add the number to dictionary entry tabs-left, or tabs-right, etc.
|
Try to add the number to dictionary entry tabs-left, or tabs-right, etc.
|
||||||
If the dictionary entry doesn't exist, create one.
|
If the dictionary entry doesn't exist, create one.
|
||||||
"""
|
'''
|
||||||
try:
|
try:
|
||||||
if self.__leader_found:
|
if self.__leader_found:
|
||||||
self.__styles_dict['par'][self.__styles_num]['tabs']\
|
self.__styles_dict['par'][self.__styles_num]['tabs']\
|
||||||
@ -353,7 +353,7 @@ class Styles:
|
|||||||
raise self.__bug_handler(msg)
|
raise self.__bug_handler(msg)
|
||||||
|
|
||||||
def __tab_leader_func(self, line):
|
def __tab_leader_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line --line to parse
|
line --line to parse
|
||||||
Returns:
|
Returns:
|
||||||
@ -362,7 +362,7 @@ class Styles:
|
|||||||
Try to add the string of the tab leader to dictionary entry
|
Try to add the string of the tab leader to dictionary entry
|
||||||
tabs-left, or tabs-right, etc. If the dictionary entry doesn't
|
tabs-left, or tabs-right, etc. If the dictionary entry doesn't
|
||||||
exist, create one.
|
exist, create one.
|
||||||
"""
|
'''
|
||||||
self.__leader_found = 1
|
self.__leader_found = 1
|
||||||
leader = self.__tab_type_dict.get(self.__token_info)
|
leader = self.__tab_type_dict.get(self.__token_info)
|
||||||
if leader is not None:
|
if leader is not None:
|
||||||
@ -378,7 +378,7 @@ class Styles:
|
|||||||
raise self.__bug_handler(msg)
|
raise self.__bug_handler(msg)
|
||||||
|
|
||||||
def __tab_bar_func(self, line):
|
def __tab_bar_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line -- line to parse
|
line -- line to parse
|
||||||
Returns:
|
Returns:
|
||||||
@ -386,7 +386,7 @@ class Styles:
|
|||||||
Logic:
|
Logic:
|
||||||
Try to add the string of the tab bar to dictionary entry tabs-bar.
|
Try to add the string of the tab bar to dictionary entry tabs-bar.
|
||||||
If the dictionary entry doesn't exist, create one.
|
If the dictionary entry doesn't exist, create one.
|
||||||
"""
|
'''
|
||||||
# self.__add_dict_entry('tabs-bar', line[20:-1])
|
# self.__add_dict_entry('tabs-bar', line[20:-1])
|
||||||
try:
|
try:
|
||||||
self.__styles_dict['par'][self.__styles_num]['tabs']\
|
self.__styles_dict['par'][self.__styles_num]['tabs']\
|
||||||
@ -482,7 +482,7 @@ class Styles:
|
|||||||
self.__styles_num = line[20:-1]
|
self.__styles_num = line[20:-1]
|
||||||
|
|
||||||
def __found_beg_ind_style_func(self, line):
|
def __found_beg_ind_style_func(self, line):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
line
|
line
|
||||||
Returns:
|
Returns:
|
||||||
@ -491,7 +491,7 @@ class Styles:
|
|||||||
Get rid of the last semicolon in the text string. Add the text
|
Get rid of the last semicolon in the text string. Add the text
|
||||||
string as the value with 'name' as the key in the style
|
string as the value with 'name' as the key in the style
|
||||||
dictionary.
|
dictionary.
|
||||||
"""
|
'''
|
||||||
self.__state = 'in_individual_style'
|
self.__state = 'in_individual_style'
|
||||||
|
|
||||||
def __found_end_ind_style_func(self, line):
|
def __found_end_ind_style_func(self, line):
|
||||||
@ -518,7 +518,7 @@ class Styles:
|
|||||||
self.__print_style_table()
|
self.__print_style_table()
|
||||||
|
|
||||||
def __fix_based_on(self):
|
def __fix_based_on(self):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
nothing
|
nothing
|
||||||
Returns:
|
Returns:
|
||||||
@ -532,7 +532,7 @@ class Styles:
|
|||||||
all the character styles.
|
all the character styles.
|
||||||
The inner loop: first check 'next-style', then check 'based-on-style'.
|
The inner loop: first check 'next-style', then check 'based-on-style'.
|
||||||
Make sure values exist for the keys to avoid the nasty keyerror message.
|
Make sure values exist for the keys to avoid the nasty keyerror message.
|
||||||
"""
|
'''
|
||||||
types = ['par', 'char']
|
types = ['par', 'char']
|
||||||
for type in types:
|
for type in types:
|
||||||
keys = self.__styles_dict[type].keys()
|
keys = self.__styles_dict[type].keys()
|
||||||
|
@ -47,19 +47,19 @@ States.
|
|||||||
|
|
||||||
|
|
||||||
class Table:
|
class Table:
|
||||||
"""
|
'''
|
||||||
Make tables.
|
Make tables.
|
||||||
Logic:
|
Logic:
|
||||||
Read one line at a time. The default state (self.__state) is
|
Read one line at a time. The default state (self.__state) is
|
||||||
'not_in_table'. Look for either a 'cw<tb<in-table__', or a row definition.
|
'not_in_table'. Look for either a 'cw<tb<in-table__', or a row definition.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
in_file,
|
in_file,
|
||||||
bug_handler,
|
bug_handler,
|
||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,):
|
run_level=1,):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
Optional:
|
Optional:
|
||||||
@ -68,7 +68,7 @@ class Table:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
@ -113,14 +113,14 @@ class Table:
|
|||||||
self.__cell_widths = []
|
self.__cell_widths = []
|
||||||
|
|
||||||
def __in_table_func(self, line):
|
def __in_table_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line -- line to parse
|
line -- line to parse
|
||||||
Logic:
|
Logic:
|
||||||
Look for the end of the table. If found, close out the table.
|
Look for the end of the table. If found, close out the table.
|
||||||
Look for 'mi<mk<pard-start', which marks the beginning of a row. Start
|
Look for 'mi<mk<pard-start', which marks the beginning of a row. Start
|
||||||
a row and start a cell.
|
a row and start a cell.
|
||||||
"""
|
'''
|
||||||
# 'cell' : ('tb', 'cell______', self.default_func),
|
# 'cell' : ('tb', 'cell______', self.default_func),
|
||||||
if self.__token_info == 'mi<mk<not-in-tbl' or\
|
if self.__token_info == 'mi<mk<not-in-tbl' or\
|
||||||
self.__token_info == 'mi<mk<sect-start' or\
|
self.__token_info == 'mi<mk<sect-start' or\
|
||||||
@ -138,7 +138,7 @@ class Table:
|
|||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
|
|
||||||
def __not_in_table_func(self, line):
|
def __not_in_table_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line -- the line of text read in from document
|
line -- the line of text read in from document
|
||||||
Returns:
|
Returns:
|
||||||
@ -148,14 +148,14 @@ class Table:
|
|||||||
mark the start of a table: 'cw<tb<row-def', or 'cw<tb<in-table__'.
|
mark the start of a table: 'cw<tb<row-def', or 'cw<tb<in-table__'.
|
||||||
If these tokens are found, use another method to start a table
|
If these tokens are found, use another method to start a table
|
||||||
and change states. Otherwise, just output the line.
|
and change states. Otherwise, just output the line.
|
||||||
"""
|
'''
|
||||||
action = self.__not_in_table_dict.get(self.__token_info)
|
action = self.__not_in_table_dict.get(self.__token_info)
|
||||||
if action:
|
if action:
|
||||||
action(line)
|
action(line)
|
||||||
self.__write_obj.write(line)
|
self.__write_obj.write(line)
|
||||||
|
|
||||||
def __close_table(self, line):
|
def __close_table(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line -- line to parse
|
line -- line to parse
|
||||||
Returns:
|
Returns:
|
||||||
@ -164,7 +164,7 @@ class Table:
|
|||||||
Write the end marker for the table.
|
Write the end marker for the table.
|
||||||
Write the end tag for the table.
|
Write the end tag for the table.
|
||||||
Set the state to ['not_in_table']
|
Set the state to ['not_in_table']
|
||||||
"""
|
'''
|
||||||
self.__write_obj.write('mi<mk<table-end_\n')
|
self.__write_obj.write('mi<mk<table-end_\n')
|
||||||
self.__state = ['not_in_table']
|
self.__state = ['not_in_table']
|
||||||
self.__table_data[-1]['number-of-columns'] = self.__max_number_cells_in_row
|
self.__table_data[-1]['number-of-columns'] = self.__max_number_cells_in_row
|
||||||
@ -175,7 +175,7 @@ class Table:
|
|||||||
self.__table_data[-1]['average-cell-width'] = average_cell_width
|
self.__table_data[-1]['average-cell-width'] = average_cell_width
|
||||||
|
|
||||||
def __found_row_def_func(self, line):
|
def __found_row_def_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line don't need this except for consistency with other methods.
|
line don't need this except for consistency with other methods.
|
||||||
Returns:
|
Returns:
|
||||||
@ -183,7 +183,7 @@ class Table:
|
|||||||
Logic:
|
Logic:
|
||||||
A row definition has been found. Collect all the data from this
|
A row definition has been found. Collect all the data from this
|
||||||
to use later in writing attributes for the table.
|
to use later in writing attributes for the table.
|
||||||
"""
|
'''
|
||||||
self.__state.append('in_row_def')
|
self.__state.append('in_row_def')
|
||||||
self.__last_cell_position = 0
|
self.__last_cell_position = 0
|
||||||
self.__row_dict = {}
|
self.__row_dict = {}
|
||||||
@ -192,7 +192,7 @@ class Table:
|
|||||||
self.__cell_widths = []
|
self.__cell_widths = []
|
||||||
|
|
||||||
def __start_table_func(self, line):
|
def __start_table_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line -- line to parse
|
line -- line to parse
|
||||||
Returns:
|
Returns:
|
||||||
@ -201,7 +201,7 @@ class Table:
|
|||||||
Add the 'in_table' to the state list.
|
Add the 'in_table' to the state list.
|
||||||
Write out the table marker.
|
Write out the table marker.
|
||||||
Initialize table values (not sure about these yet)
|
Initialize table values (not sure about these yet)
|
||||||
"""
|
'''
|
||||||
self.__rows_in_table = 0
|
self.__rows_in_table = 0
|
||||||
self.__cells_in_table = 0
|
self.__cells_in_table = 0
|
||||||
self.__cells_in_row = 0
|
self.__cells_in_row = 0
|
||||||
@ -223,7 +223,7 @@ class Table:
|
|||||||
self.__close_table(self, line)
|
self.__close_table(self, line)
|
||||||
|
|
||||||
def __end_row_def_func(self, line):
|
def __end_row_def_func(self, line):
|
||||||
"""
|
'''
|
||||||
Requires:
|
Requires:
|
||||||
line --just for consistency
|
line --just for consistency
|
||||||
Returns:
|
Returns:
|
||||||
@ -233,7 +233,7 @@ class Table:
|
|||||||
get rid of the last {} in the cell list
|
get rid of the last {} in the cell list
|
||||||
figure out the number of cells based on the self.__row_dict[widths]
|
figure out the number of cells based on the self.__row_dict[widths]
|
||||||
('122, 122')
|
('122, 122')
|
||||||
"""
|
'''
|
||||||
if len(self.__state) > 0:
|
if len(self.__state) > 0:
|
||||||
if self.__state[-1] == 'in_row_def':
|
if self.__state[-1] == 'in_row_def':
|
||||||
self.__state.pop()
|
self.__state.pop()
|
||||||
@ -330,7 +330,7 @@ class Table:
|
|||||||
self.__row_dict['header'] = 'true'
|
self.__row_dict['header'] = 'true'
|
||||||
|
|
||||||
def __start_cell_func(self, line):
|
def __start_cell_func(self, line):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
line -- the line of text
|
line -- the line of text
|
||||||
Returns:
|
Returns:
|
||||||
@ -341,7 +341,7 @@ class Table:
|
|||||||
Write value => attributes for key=> value
|
Write value => attributes for key=> value
|
||||||
pop the self.__cell_list.
|
pop the self.__cell_list.
|
||||||
Otherwise, print out a cell tag.
|
Otherwise, print out a cell tag.
|
||||||
"""
|
'''
|
||||||
self.__state.append('in_cell')
|
self.__state.append('in_cell')
|
||||||
# self.__cell_list = []
|
# self.__cell_list = []
|
||||||
if len(self.__cell_list) > 0:
|
if len(self.__cell_list) > 0:
|
||||||
@ -361,7 +361,7 @@ class Table:
|
|||||||
self.__cells_in_row += 1
|
self.__cells_in_row += 1
|
||||||
|
|
||||||
def __start_row_func(self, line):
|
def __start_row_func(self, line):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
line -- the line of text
|
line -- the line of text
|
||||||
Returns:
|
Returns:
|
||||||
@ -369,7 +369,7 @@ class Table:
|
|||||||
Logic:
|
Logic:
|
||||||
Append 'in_row' for states
|
Append 'in_row' for states
|
||||||
Write value => attributes for key=> value
|
Write value => attributes for key=> value
|
||||||
"""
|
'''
|
||||||
self.__state.append('in_row')
|
self.__state.append('in_row')
|
||||||
self.__write_obj.write('mi<tg<open-att__<row')
|
self.__write_obj.write('mi<tg<open-att__<row')
|
||||||
keys = self.__row_dict.keys()
|
keys = self.__row_dict.keys()
|
||||||
|
@ -34,7 +34,7 @@ class TableInfo:
|
|||||||
table_data,
|
table_data,
|
||||||
copy=None,
|
copy=None,
|
||||||
run_level=1,):
|
run_level=1,):
|
||||||
"""
|
'''
|
||||||
Required:
|
Required:
|
||||||
'file'--file to parse
|
'file'--file to parse
|
||||||
'table_data' -- a dictionary for each table.
|
'table_data' -- a dictionary for each table.
|
||||||
@ -44,7 +44,7 @@ class TableInfo:
|
|||||||
directory from which the script is run.)
|
directory from which the script is run.)
|
||||||
Returns:
|
Returns:
|
||||||
nothing
|
nothing
|
||||||
"""
|
'''
|
||||||
self.__file = in_file
|
self.__file = in_file
|
||||||
self.__bug_handler = bug_handler
|
self.__bug_handler = bug_handler
|
||||||
self.__copy = copy
|
self.__copy = copy
|
||||||
|
@ -76,7 +76,7 @@ def _normalize_newlines(string):
|
|||||||
|
|
||||||
|
|
||||||
def getimagesize(url):
|
def getimagesize(url):
|
||||||
"""
|
'''
|
||||||
Attempts to determine an image's width and height, and returns a string
|
Attempts to determine an image's width and height, and returns a string
|
||||||
suitable for use in an <img> tag, or None in case of failure.
|
suitable for use in an <img> tag, or None in case of failure.
|
||||||
Requires that PIL is installed.
|
Requires that PIL is installed.
|
||||||
@ -85,7 +85,7 @@ def getimagesize(url):
|
|||||||
... #doctest: +ELLIPSIS, +SKIP
|
... #doctest: +ELLIPSIS, +SKIP
|
||||||
'width="..." height="..."'
|
'width="..." height="..."'
|
||||||
|
|
||||||
"""
|
'''
|
||||||
|
|
||||||
from PIL import ImageFile
|
from PIL import ImageFile
|
||||||
|
|
||||||
@ -274,11 +274,11 @@ class Textile:
|
|||||||
self.html_type = 'xhtml'
|
self.html_type = 'xhtml'
|
||||||
|
|
||||||
def textile(self, text, rel=None, head_offset=0, html_type='xhtml'):
|
def textile(self, text, rel=None, head_offset=0, html_type='xhtml'):
|
||||||
"""
|
'''
|
||||||
>>> import textile
|
>>> import textile
|
||||||
>>> textile.textile('some textile')
|
>>> textile.textile('some textile')
|
||||||
u'\\t<p>some textile</p>'
|
u'\\t<p>some textile</p>'
|
||||||
"""
|
'''
|
||||||
self.html_type = html_type
|
self.html_type = html_type
|
||||||
|
|
||||||
# text = type(u'')(text)
|
# text = type(u'')(text)
|
||||||
@ -298,7 +298,7 @@ class Textile:
|
|||||||
return text
|
return text
|
||||||
|
|
||||||
def pba(self, input, element=None):
|
def pba(self, input, element=None):
|
||||||
"""
|
'''
|
||||||
Parse block attributes.
|
Parse block attributes.
|
||||||
|
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
@ -338,7 +338,7 @@ class Textile:
|
|||||||
>>> t.pba('[fr]')
|
>>> t.pba('[fr]')
|
||||||
' lang="fr"'
|
' lang="fr"'
|
||||||
|
|
||||||
"""
|
'''
|
||||||
style = []
|
style = []
|
||||||
aclass = ''
|
aclass = ''
|
||||||
lang = ''
|
lang = ''
|
||||||
@ -420,7 +420,7 @@ class Textile:
|
|||||||
return ''.join(result)
|
return ''.join(result)
|
||||||
|
|
||||||
def hasRawText(self, text):
|
def hasRawText(self, text):
|
||||||
"""
|
'''
|
||||||
checks whether the text has text not already enclosed by a block tag
|
checks whether the text has text not already enclosed by a block tag
|
||||||
|
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
@ -430,17 +430,17 @@ class Textile:
|
|||||||
>>> t.hasRawText(' why yes, yes it does')
|
>>> t.hasRawText(' why yes, yes it does')
|
||||||
True
|
True
|
||||||
|
|
||||||
"""
|
'''
|
||||||
r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>', re.S).sub('', text.strip()).strip()
|
r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>', re.S).sub('', text.strip()).strip()
|
||||||
r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)
|
r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)
|
||||||
return '' != r
|
return '' != r
|
||||||
|
|
||||||
def table(self, text):
|
def table(self, text):
|
||||||
r"""
|
r'''
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
>>> t.table('|one|two|three|\n|a|b|c|')
|
>>> t.table('|one|two|three|\n|a|b|c|')
|
||||||
'\t<table>\n\t\t<tr>\n\t\t\t<td>one</td>\n\t\t\t<td>two</td>\n\t\t\t<td>three</td>\n\t\t</tr>\n\t\t<tr>\n\t\t\t<td>a</td>\n\t\t\t<td>b</td>\n\t\t\t<td>c</td>\n\t\t</tr>\n\t</table>\n\n'
|
'\t<table>\n\t\t<tr>\n\t\t\t<td>one</td>\n\t\t\t<td>two</td>\n\t\t\t<td>three</td>\n\t\t</tr>\n\t\t<tr>\n\t\t\t<td>a</td>\n\t\t\t<td>b</td>\n\t\t\t<td>c</td>\n\t\t</tr>\n\t</table>\n\n'
|
||||||
"""
|
'''
|
||||||
text = text + '\n\n'
|
text = text + '\n\n'
|
||||||
pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n' % {'s':self.s, 'a':self.a, 'c':self.c}, re.S|re.M|re.U)
|
pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n' % {'s':self.s, 'a':self.a, 'c':self.c}, re.S|re.M|re.U)
|
||||||
return pattern.sub(self.fTable, text)
|
return pattern.sub(self.fTable, text)
|
||||||
@ -476,11 +476,11 @@ class Textile:
|
|||||||
return '\t<table{}>\n{}\n\t</table>\n\n'.format(tatts, '\n'.join(rows))
|
return '\t<table{}>\n{}\n\t</table>\n\n'.format(tatts, '\n'.join(rows))
|
||||||
|
|
||||||
def lists(self, text):
|
def lists(self, text):
|
||||||
"""
|
'''
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
>>> t.lists("* one\\n* two\\n* three")
|
>>> t.lists("* one\\n* two\\n* three")
|
||||||
'\\t<ul>\\n\\t\\t<li>one</li>\\n\\t\\t<li>two</li>\\n\\t\\t<li>three</li>\\n\\t</ul>'
|
'\\t<ul>\\n\\t\\t<li>one</li>\\n\\t\\t<li>two</li>\\n\\t\\t<li>three</li>\\n\\t</ul>'
|
||||||
"""
|
'''
|
||||||
pattern = re.compile(r'^([#*]+%s .*)$(?![^#*])' % self.c, re.U|re.M|re.S)
|
pattern = re.compile(r'^([#*]+%s .*)$(?![^#*])' % self.c, re.U|re.M|re.S)
|
||||||
return pattern.sub(self.fList, text)
|
return pattern.sub(self.fList, text)
|
||||||
|
|
||||||
@ -537,11 +537,11 @@ class Textile:
|
|||||||
return f'<{match.group(1)}{match.group(2)}>{content}{match.group(4)}'
|
return f'<{match.group(1)}{match.group(2)}>{content}{match.group(4)}'
|
||||||
|
|
||||||
def block(self, text, head_offset=0):
|
def block(self, text, head_offset=0):
|
||||||
"""
|
'''
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
>>> t.block('h1. foobar baby')
|
>>> t.block('h1. foobar baby')
|
||||||
'\\t<h1>foobar baby</h1>'
|
'\\t<h1>foobar baby</h1>'
|
||||||
"""
|
'''
|
||||||
if not self.lite:
|
if not self.lite:
|
||||||
tre = '|'.join(self.btag)
|
tre = '|'.join(self.btag)
|
||||||
else:
|
else:
|
||||||
@ -612,7 +612,7 @@ class Textile:
|
|||||||
return '\n\n'.join(out)
|
return '\n\n'.join(out)
|
||||||
|
|
||||||
def fBlock(self, tag, atts, ext, cite, content):
|
def fBlock(self, tag, atts, ext, cite, content):
|
||||||
"""
|
'''
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
>>> t.fBlock("bq", "", None, "", "Hello BlockQuote")
|
>>> t.fBlock("bq", "", None, "", "Hello BlockQuote")
|
||||||
('\\t<blockquote>\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
|
('\\t<blockquote>\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
|
||||||
@ -625,7 +625,7 @@ class Textile:
|
|||||||
|
|
||||||
>>> t.fBlock("h1", "", None, "", "foobar")
|
>>> t.fBlock("h1", "", None, "", "foobar")
|
||||||
('', '\\t<h1>', 'foobar', '</h1>', '')
|
('', '\\t<h1>', 'foobar', '</h1>', '')
|
||||||
"""
|
'''
|
||||||
atts = self.pba(atts)
|
atts = self.pba(atts)
|
||||||
o1 = o2 = c2 = c1 = ''
|
o1 = o2 = c2 = c1 = ''
|
||||||
|
|
||||||
@ -678,11 +678,11 @@ class Textile:
|
|||||||
return o1, o2, content, c2, c1
|
return o1, o2, content, c2, c1
|
||||||
|
|
||||||
def footnoteRef(self, text):
|
def footnoteRef(self, text):
|
||||||
"""
|
'''
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
>>> t.footnoteRef('foo[1] ') # doctest: +ELLIPSIS
|
>>> t.footnoteRef('foo[1] ') # doctest: +ELLIPSIS
|
||||||
'foo<sup class="footnote"><a href="#fn...">1</a></sup> '
|
'foo<sup class="footnote"><a href="#fn...">1</a></sup> '
|
||||||
"""
|
'''
|
||||||
return re.sub(r'\b\[([0-9]+)\](\s)?', self.footnoteID, text)
|
return re.sub(r'\b\[([0-9]+)\](\s)?', self.footnoteID, text)
|
||||||
|
|
||||||
def footnoteID(self, match):
|
def footnoteID(self, match):
|
||||||
@ -695,7 +695,7 @@ class Textile:
|
|||||||
return f'<sup class="footnote"><a href="#fn{fnid}">{id}</a></sup>{t}'
|
return f'<sup class="footnote"><a href="#fn{fnid}">{id}</a></sup>{t}'
|
||||||
|
|
||||||
def glyphs(self, text):
|
def glyphs(self, text):
|
||||||
"""
|
'''
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
|
|
||||||
>>> t.glyphs("apostrophe's")
|
>>> t.glyphs("apostrophe's")
|
||||||
@ -716,7 +716,7 @@ class Textile:
|
|||||||
>>> t.glyphs("<p><cite>Cat's Cradle</cite> by Vonnegut</p>")
|
>>> t.glyphs("<p><cite>Cat's Cradle</cite> by Vonnegut</p>")
|
||||||
'<p><cite>Cat’s Cradle</cite> by Vonnegut</p>'
|
'<p><cite>Cat’s Cradle</cite> by Vonnegut</p>'
|
||||||
|
|
||||||
"""
|
'''
|
||||||
# fix: hackish
|
# fix: hackish
|
||||||
text = re.sub(r'"\Z', '" ', text)
|
text = re.sub(r'"\Z', '" ', text)
|
||||||
|
|
||||||
@ -798,12 +798,12 @@ class Textile:
|
|||||||
return id
|
return id
|
||||||
|
|
||||||
def retrieve(self, text):
|
def retrieve(self, text):
|
||||||
"""
|
'''
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
>>> id = t.shelve("foobar")
|
>>> id = t.shelve("foobar")
|
||||||
>>> t.retrieve(id)
|
>>> t.retrieve(id)
|
||||||
'foobar'
|
'foobar'
|
||||||
"""
|
'''
|
||||||
while True:
|
while True:
|
||||||
old = text
|
old = text
|
||||||
for k, v in self.shelf.items():
|
for k, v in self.shelf.items():
|
||||||
@ -850,11 +850,11 @@ class Textile:
|
|||||||
return text.rstrip('\n')
|
return text.rstrip('\n')
|
||||||
|
|
||||||
def links(self, text):
|
def links(self, text):
|
||||||
"""
|
'''
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
>>> t.links('fooobar "Google":http://google.com/foobar/ and hello world "flickr":http://flickr.com/photos/jsamsa/ ') # doctest: +ELLIPSIS
|
>>> t.links('fooobar "Google":http://google.com/foobar/ and hello world "flickr":http://flickr.com/photos/jsamsa/ ') # doctest: +ELLIPSIS
|
||||||
'fooobar ... and hello world ...'
|
'fooobar ... and hello world ...'
|
||||||
"""
|
'''
|
||||||
|
|
||||||
text = self.macros_only(text)
|
text = self.macros_only(text)
|
||||||
punct = '!"#$%&\'*+,-./:;=?@\\^_`|~'
|
punct = '!"#$%&\'*+,-./:;=?@\\^_`|~'
|
||||||
@ -906,11 +906,11 @@ class Textile:
|
|||||||
return ''.join([pre, out, post])
|
return ''.join([pre, out, post])
|
||||||
|
|
||||||
def span(self, text):
|
def span(self, text):
|
||||||
"""
|
'''
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
>>> t.span(r"hello %(bob)span *strong* and **bold**% goodbye")
|
>>> t.span(r"hello %(bob)span *strong* and **bold**% goodbye")
|
||||||
'hello <span class="bob">span <strong>strong</strong> and <b>bold</b></span> goodbye'
|
'hello <span class="bob">span <strong>strong</strong> and <b>bold</b></span> goodbye'
|
||||||
"""
|
'''
|
||||||
qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__', r'_', r'%', r'\+', r'~', r'\^')
|
qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__', r'_', r'%', r'\+', r'~', r'\^')
|
||||||
pnct = ".,\"'?!;:"
|
pnct = ".,\"'?!;:"
|
||||||
|
|
||||||
@ -955,11 +955,11 @@ class Textile:
|
|||||||
return out
|
return out
|
||||||
|
|
||||||
def image(self, text):
|
def image(self, text):
|
||||||
"""
|
'''
|
||||||
>>> t = Textile()
|
>>> t = Textile()
|
||||||
>>> t.image('!/imgs/myphoto.jpg!:http://jsamsa.com')
|
>>> t.image('!/imgs/myphoto.jpg!:http://jsamsa.com')
|
||||||
'<a href="http://jsamsa.com"><img src="/imgs/myphoto.jpg" alt="" /></a>'
|
'<a href="http://jsamsa.com"><img src="/imgs/myphoto.jpg" alt="" /></a>'
|
||||||
"""
|
'''
|
||||||
pattern = re.compile(r'''
|
pattern = re.compile(r'''
|
||||||
(?:[\[{])? # pre
|
(?:[\[{])? # pre
|
||||||
\! # opening !
|
\! # opening !
|
||||||
@ -1058,17 +1058,17 @@ class Textile:
|
|||||||
|
|
||||||
|
|
||||||
def textile(text, head_offset=0, html_type='xhtml', encoding=None, output=None):
|
def textile(text, head_offset=0, html_type='xhtml', encoding=None, output=None):
|
||||||
"""
|
'''
|
||||||
this function takes additional parameters:
|
this function takes additional parameters:
|
||||||
head_offset - offset to apply to heading levels (default: 0)
|
head_offset - offset to apply to heading levels (default: 0)
|
||||||
html_type - 'xhtml' or 'html' style tags (default: 'xhtml')
|
html_type - 'xhtml' or 'html' style tags (default: 'xhtml')
|
||||||
"""
|
'''
|
||||||
return Textile().textile(text, head_offset=head_offset,
|
return Textile().textile(text, head_offset=head_offset,
|
||||||
html_type=html_type)
|
html_type=html_type)
|
||||||
|
|
||||||
|
|
||||||
def textile_restricted(text, lite=True, noimage=True, html_type='xhtml'):
|
def textile_restricted(text, lite=True, noimage=True, html_type='xhtml'):
|
||||||
"""
|
'''
|
||||||
Restricted version of Textile designed for weblog comments and other
|
Restricted version of Textile designed for weblog comments and other
|
||||||
untrusted input.
|
untrusted input.
|
||||||
|
|
||||||
@ -1083,7 +1083,7 @@ def textile_restricted(text, lite=True, noimage=True, html_type='xhtml'):
|
|||||||
When noimage=True is set (the default):
|
When noimage=True is set (the default):
|
||||||
Image tags are disabled.
|
Image tags are disabled.
|
||||||
|
|
||||||
"""
|
'''
|
||||||
return Textile(restricted=True, lite=lite,
|
return Textile(restricted=True, lite=lite,
|
||||||
noimage=noimage).textile(text, rel='nofollow',
|
noimage=noimage).textile(text, rel='nofollow',
|
||||||
html_type=html_type)
|
html_type=html_type)
|
||||||
|
@ -186,7 +186,7 @@ class MarkdownHighlighter(QSyntaxHighlighter):
|
|||||||
prev = prevBlock.text()
|
prev = prevBlock.text()
|
||||||
prevAscii = str(prev.replace('\u2029','\n'))
|
prevAscii = str(prev.replace('\u2029','\n'))
|
||||||
if self.offset == 0 and prevAscii.strip():
|
if self.offset == 0 and prevAscii.strip():
|
||||||
#print "Its a header"
|
#print 'Its a header'
|
||||||
prevCursor.select(QTextCursor.SelectionType.LineUnderCursor)
|
prevCursor.select(QTextCursor.SelectionType.LineUnderCursor)
|
||||||
#prevCursor.setCharFormat(self.MARKDOWN_KWS_FORMAT['Header'])
|
#prevCursor.setCharFormat(self.MARKDOWN_KWS_FORMAT['Header'])
|
||||||
formatRange = QTextLayout.FormatRange()
|
formatRange = QTextLayout.FormatRange()
|
||||||
|
@ -841,7 +841,7 @@ class CreateCustomColumn(QDialog):
|
|||||||
|
|
||||||
|
|
||||||
class CreateNewCustomColumn:
|
class CreateNewCustomColumn:
|
||||||
"""
|
'''
|
||||||
Provide an API to create new custom columns.
|
Provide an API to create new custom columns.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
@ -942,7 +942,7 @@ class CreateNewCustomColumn:
|
|||||||
The method returns Result.MUST_RESTART if further calibre configuration has
|
The method returns Result.MUST_RESTART if further calibre configuration has
|
||||||
been blocked. You can check for this situation in advance by calling
|
been blocked. You can check for this situation in advance by calling
|
||||||
must_restart().
|
must_restart().
|
||||||
"""
|
'''
|
||||||
|
|
||||||
class Result(Enum):
|
class Result(Enum):
|
||||||
COLUMN_ADDED = 0
|
COLUMN_ADDED = 0
|
||||||
@ -1058,7 +1058,7 @@ class CreateNewCustomColumn:
|
|||||||
return (self.Result.CANCELED, _('Canceled'))
|
return (self.Result.CANCELED, _('Canceled'))
|
||||||
|
|
||||||
def current_columns(self):
|
def current_columns(self):
|
||||||
"""
|
'''
|
||||||
Return the currently defined custom columns
|
Return the currently defined custom columns
|
||||||
|
|
||||||
Return the currently defined custom columns including the ones that haven't
|
Return the currently defined custom columns including the ones that haven't
|
||||||
@ -1075,20 +1075,20 @@ class CreateNewCustomColumn:
|
|||||||
Columns that already exist will have additional attributes that this class
|
Columns that already exist will have additional attributes that this class
|
||||||
doesn't use. See calibre.library.field_metadata.add_custom_field() for the
|
doesn't use. See calibre.library.field_metadata.add_custom_field() for the
|
||||||
complete list.
|
complete list.
|
||||||
"""
|
'''
|
||||||
# deepcopy to prevent users from changing it. The new MappingProxyType
|
# deepcopy to prevent users from changing it. The new MappingProxyType
|
||||||
# isn't enough because only the top-level dict is immutable, not the
|
# isn't enough because only the top-level dict is immutable, not the
|
||||||
# items in the dict.
|
# items in the dict.
|
||||||
return copy.deepcopy(self.custcols)
|
return copy.deepcopy(self.custcols)
|
||||||
|
|
||||||
def current_headings(self):
|
def current_headings(self):
|
||||||
"""
|
'''
|
||||||
Return the currently defined column headings
|
Return the currently defined column headings
|
||||||
|
|
||||||
Return the column headings including the ones that haven't yet been
|
Return the column headings including the ones that haven't yet been
|
||||||
created. It is a dict. The key is the heading, the value is the lookup
|
created. It is a dict. The key is the heading, the value is the lookup
|
||||||
name having that heading.
|
name having that heading.
|
||||||
"""
|
'''
|
||||||
return {v['name']:('#' + v['label']) for v in self.custcols.values()}
|
return {v['name']:('#' + v['label']) for v in self.custcols.values()}
|
||||||
|
|
||||||
def must_restart(self):
|
def must_restart(self):
|
||||||
|
@ -207,7 +207,7 @@ class CatalogBuilder:
|
|||||||
''' key() functions '''
|
''' key() functions '''
|
||||||
|
|
||||||
def _kf_author_to_author_sort(self, author):
|
def _kf_author_to_author_sort(self, author):
|
||||||
""" Compute author_sort value from author
|
''' Compute author_sort value from author
|
||||||
|
|
||||||
Tokenize author string, return capitalized string with last token first
|
Tokenize author string, return capitalized string with last token first
|
||||||
|
|
||||||
@ -216,7 +216,7 @@ class CatalogBuilder:
|
|||||||
|
|
||||||
Return:
|
Return:
|
||||||
(str): 'Smith, john'
|
(str): 'Smith, john'
|
||||||
"""
|
'''
|
||||||
tokens = author.split()
|
tokens = author.split()
|
||||||
tokens = tokens[-1:] + tokens[:-1]
|
tokens = tokens[-1:] + tokens[:-1]
|
||||||
if len(tokens) > 1:
|
if len(tokens) > 1:
|
||||||
@ -224,7 +224,7 @@ class CatalogBuilder:
|
|||||||
return ' '.join(tokens).capitalize()
|
return ' '.join(tokens).capitalize()
|
||||||
|
|
||||||
def _kf_books_by_author_sorter_author(self, book):
|
def _kf_books_by_author_sorter_author(self, book):
|
||||||
""" Generate book sort key with computed author_sort.
|
''' Generate book sort key with computed author_sort.
|
||||||
|
|
||||||
Generate a sort key of computed author_sort, title. Used to look for
|
Generate a sort key of computed author_sort, title. Used to look for
|
||||||
author_sort mismatches.
|
author_sort mismatches.
|
||||||
@ -237,7 +237,7 @@ class CatalogBuilder:
|
|||||||
|
|
||||||
Return:
|
Return:
|
||||||
(str): sort key
|
(str): sort key
|
||||||
"""
|
'''
|
||||||
if not book['series']:
|
if not book['series']:
|
||||||
key = '{} {}'.format(self._kf_author_to_author_sort(book['author']),
|
key = '{} {}'.format(self._kf_author_to_author_sort(book['author']),
|
||||||
capitalize(book['title_sort']))
|
capitalize(book['title_sort']))
|
||||||
@ -460,7 +460,7 @@ class CatalogBuilder:
|
|||||||
(self.thumbs_path, float(cached_thumb_width)))
|
(self.thumbs_path, float(cached_thumb_width)))
|
||||||
|
|
||||||
def convert_html_entities(self, s):
|
def convert_html_entities(self, s):
|
||||||
""" Convert string containing HTML entities to its unicode equivalent.
|
''' Convert string containing HTML entities to its unicode equivalent.
|
||||||
|
|
||||||
Convert a string containing HTML entities of the form '&' or '&97;'
|
Convert a string containing HTML entities of the form '&' or '&97;'
|
||||||
to a normalized unicode string. E.g., 'AT&T' converted to 'AT&T'.
|
to a normalized unicode string. E.g., 'AT&T' converted to 'AT&T'.
|
||||||
@ -470,7 +470,7 @@ class CatalogBuilder:
|
|||||||
|
|
||||||
Return:
|
Return:
|
||||||
s (str): converted string
|
s (str): converted string
|
||||||
"""
|
'''
|
||||||
return replace_entities(s)
|
return replace_entities(s)
|
||||||
|
|
||||||
def copy_catalog_resources(self):
|
def copy_catalog_resources(self):
|
||||||
@ -884,7 +884,7 @@ class CatalogBuilder:
|
|||||||
raise EmptyCatalogException(error_msg)
|
raise EmptyCatalogException(error_msg)
|
||||||
|
|
||||||
def fetch_books_to_catalog(self):
|
def fetch_books_to_catalog(self):
|
||||||
""" Populate self.books_to_catalog from database
|
''' Populate self.books_to_catalog from database
|
||||||
|
|
||||||
Create self.books_to_catalog from filtered database.
|
Create self.books_to_catalog from filtered database.
|
||||||
Keys:
|
Keys:
|
||||||
@ -917,7 +917,7 @@ class CatalogBuilder:
|
|||||||
Returns:
|
Returns:
|
||||||
True: Successful
|
True: Successful
|
||||||
False: Empty data, (check filter restrictions)
|
False: Empty data, (check filter restrictions)
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def _populate_title(record):
|
def _populate_title(record):
|
||||||
''' populate this_title with massaged metadata '''
|
''' populate this_title with massaged metadata '''
|
||||||
@ -1334,7 +1334,7 @@ class CatalogBuilder:
|
|||||||
return tag_list
|
return tag_list
|
||||||
|
|
||||||
def format_ncx_text(self, description, dest=None):
|
def format_ncx_text(self, description, dest=None):
|
||||||
""" Massage NCX text for Kindle.
|
''' Massage NCX text for Kindle.
|
||||||
|
|
||||||
Convert HTML entities for proper display on Kindle, convert
|
Convert HTML entities for proper display on Kindle, convert
|
||||||
'&' to '&' (Kindle fails).
|
'&' to '&' (Kindle fails).
|
||||||
@ -1345,7 +1345,7 @@ class CatalogBuilder:
|
|||||||
|
|
||||||
Return:
|
Return:
|
||||||
(str): massaged, possibly truncated description
|
(str): massaged, possibly truncated description
|
||||||
"""
|
'''
|
||||||
# Kindle TOC descriptions won't render certain characters
|
# Kindle TOC descriptions won't render certain characters
|
||||||
# Fix up
|
# Fix up
|
||||||
massaged = xml_replace_entities(str(description))
|
massaged = xml_replace_entities(str(description))
|
||||||
@ -1388,7 +1388,7 @@ class CatalogBuilder:
|
|||||||
return re.sub(r'\W', '', ascii_text(author))
|
return re.sub(r'\W', '', ascii_text(author))
|
||||||
|
|
||||||
def generate_format_args(self, book):
|
def generate_format_args(self, book):
|
||||||
""" Generate the format args for template substitution.
|
''' Generate the format args for template substitution.
|
||||||
|
|
||||||
self.load_section_templates imports string formatting templates of the form
|
self.load_section_templates imports string formatting templates of the form
|
||||||
'by_*_template.py' for use in the various sections. The templates are designed to use
|
'by_*_template.py' for use in the various sections. The templates are designed to use
|
||||||
@ -1399,7 +1399,7 @@ class CatalogBuilder:
|
|||||||
|
|
||||||
Return:
|
Return:
|
||||||
(dict): formatted args for templating
|
(dict): formatted args for templating
|
||||||
"""
|
'''
|
||||||
series_index = str(book['series_index'])
|
series_index = str(book['series_index'])
|
||||||
if series_index.endswith('.0'):
|
if series_index.endswith('.0'):
|
||||||
series_index = series_index[:-2]
|
series_index = series_index[:-2]
|
||||||
@ -2032,7 +2032,7 @@ class CatalogBuilder:
|
|||||||
bookmarked_books = []
|
bookmarked_books = []
|
||||||
for bm_book in self.bookmarked_books:
|
for bm_book in self.bookmarked_books:
|
||||||
book = self.bookmarked_books[bm_book]
|
book = self.bookmarked_books[bm_book]
|
||||||
# print "bm_book: %s" % bm_book
|
# print 'bm_book: %s' % bm_book
|
||||||
book[1]['bookmark_timestamp'] = book[0].timestamp
|
book[1]['bookmark_timestamp'] = book[0].timestamp
|
||||||
try:
|
try:
|
||||||
book[1]['percent_read'] = min(float(100 * book[0].last_read / book[0].book_length), 100)
|
book[1]['percent_read'] = min(float(100 * book[0].last_read / book[0].book_length), 100)
|
||||||
@ -2139,7 +2139,7 @@ class CatalogBuilder:
|
|||||||
master_genre_list = []
|
master_genre_list = []
|
||||||
for genre_tag_set in genre_list:
|
for genre_tag_set in genre_list:
|
||||||
for (index, genre) in enumerate(genre_tag_set):
|
for (index, genre) in enumerate(genre_tag_set):
|
||||||
# print "genre: %s \t genre_tag_set[genre]: %s" % (genre, genre_tag_set[genre])
|
# print 'genre: %s \t genre_tag_set[genre]: %s' % (genre, genre_tag_set[genre])
|
||||||
|
|
||||||
# Create sorted_authors[0] = friendly, [1] = author_sort for NCX creation
|
# Create sorted_authors[0] = friendly, [1] = author_sort for NCX creation
|
||||||
authors = []
|
authors = []
|
||||||
@ -3396,7 +3396,7 @@ class CatalogBuilder:
|
|||||||
for book in self.books_by_date_range:
|
for book in self.books_by_date_range:
|
||||||
book_time = datetime.datetime(book['timestamp'].year, book['timestamp'].month, book['timestamp'].day)
|
book_time = datetime.datetime(book['timestamp'].year, book['timestamp'].month, book['timestamp'].day)
|
||||||
if (today_time - book_time).days <= date_range_limit:
|
if (today_time - book_time).days <= date_range_limit:
|
||||||
# print "generate_ncx_by_date_added: %s added %d days ago" % (book['title'], (today_time-book_time).days)
|
# print 'generate_ncx_by_date_added: %s added %d days ago' % (book['title'], (today_time-book_time).days)
|
||||||
current_titles_list.append(book['title'])
|
current_titles_list.append(book['title'])
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
@ -3505,7 +3505,7 @@ class CatalogBuilder:
|
|||||||
for book in self.bookmarked_books_by_date_read:
|
for book in self.bookmarked_books_by_date_read:
|
||||||
bookmark_time = utcfromtimestamp(book['bookmark_timestamp'])
|
bookmark_time = utcfromtimestamp(book['bookmark_timestamp'])
|
||||||
if (today_time - bookmark_time).days <= date_range_limit:
|
if (today_time - bookmark_time).days <= date_range_limit:
|
||||||
# print "generate_ncx_by_date_added: %s added %d days ago" % (book['title'], (today_time-book_time).days)
|
# print 'generate_ncx_by_date_added: %s added %d days ago' % (book['title'], (today_time-book_time).days)
|
||||||
current_titles_list.append(book['title'])
|
current_titles_list.append(book['title'])
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
@ -3609,7 +3609,7 @@ class CatalogBuilder:
|
|||||||
self.generate_ncx_subsection(navPointTag, sec_id, sec_text, content_src, cm_tags)
|
self.generate_ncx_subsection(navPointTag, sec_id, sec_text, content_src, cm_tags)
|
||||||
|
|
||||||
def generate_opf(self):
|
def generate_opf(self):
|
||||||
""" Generate the OPF file.
|
''' Generate the OPF file.
|
||||||
|
|
||||||
Start with header template, construct manifest, spine and guide.
|
Start with header template, construct manifest, spine and guide.
|
||||||
|
|
||||||
@ -3624,7 +3624,7 @@ class CatalogBuilder:
|
|||||||
|
|
||||||
Outputs:
|
Outputs:
|
||||||
opts.basename + '.opf' (file): written
|
opts.basename + '.opf' (file): written
|
||||||
"""
|
'''
|
||||||
|
|
||||||
self.update_progress_full_step(_('Generating OPF'))
|
self.update_progress_full_step(_('Generating OPF'))
|
||||||
lang = get_lang() or 'en'
|
lang = get_lang() or 'en'
|
||||||
@ -3798,7 +3798,7 @@ class CatalogBuilder:
|
|||||||
raise RuntimeError
|
raise RuntimeError
|
||||||
|
|
||||||
def generate_sort_title(self, title):
|
def generate_sort_title(self, title):
|
||||||
""" Generates a sort string from title.
|
''' Generates a sort string from title.
|
||||||
|
|
||||||
Based on trunk title_sort algorithm, but also accommodates series
|
Based on trunk title_sort algorithm, but also accommodates series
|
||||||
numbers by padding with leading zeroes to force proper numeric
|
numbers by padding with leading zeroes to force proper numeric
|
||||||
@ -3810,7 +3810,7 @@ class CatalogBuilder:
|
|||||||
|
|
||||||
Return:
|
Return:
|
||||||
(str): sort string
|
(str): sort string
|
||||||
"""
|
'''
|
||||||
|
|
||||||
from calibre.ebooks.metadata import title_sort
|
from calibre.ebooks.metadata import title_sort
|
||||||
from calibre.library.catalogs.utils import NumberToText
|
from calibre.library.catalogs.utils import NumberToText
|
||||||
@ -4077,7 +4077,7 @@ class CatalogBuilder:
|
|||||||
return profile
|
return profile
|
||||||
|
|
||||||
def get_prefix_rules(self):
|
def get_prefix_rules(self):
|
||||||
""" Convert opts.prefix_rules to dict.
|
''' Convert opts.prefix_rules to dict.
|
||||||
|
|
||||||
Convert opts.prefix_rules to dict format. The model for a prefix rule is
|
Convert opts.prefix_rules to dict format. The model for a prefix rule is
|
||||||
('<rule name>','<#source_field_lookup>','<pattern>','<prefix>')
|
('<rule name>','<#source_field_lookup>','<pattern>','<prefix>')
|
||||||
@ -4087,7 +4087,7 @@ class CatalogBuilder:
|
|||||||
|
|
||||||
Return:
|
Return:
|
||||||
(list): list of prefix_rules dicts
|
(list): list of prefix_rules dicts
|
||||||
"""
|
'''
|
||||||
pr = []
|
pr = []
|
||||||
if self.opts.prefix_rules:
|
if self.opts.prefix_rules:
|
||||||
try:
|
try:
|
||||||
@ -4309,7 +4309,7 @@ class CatalogBuilder:
|
|||||||
return books_by_author
|
return books_by_author
|
||||||
|
|
||||||
def update_progress_full_step(self, description):
|
def update_progress_full_step(self, description):
|
||||||
""" Update calibre's job status UI.
|
''' Update calibre's job status UI.
|
||||||
|
|
||||||
Call ProgessReporter() with updates.
|
Call ProgessReporter() with updates.
|
||||||
|
|
||||||
@ -4318,7 +4318,7 @@ class CatalogBuilder:
|
|||||||
|
|
||||||
Result:
|
Result:
|
||||||
(UI): Jobs UI updated
|
(UI): Jobs UI updated
|
||||||
"""
|
'''
|
||||||
|
|
||||||
self.current_step += 1
|
self.current_step += 1
|
||||||
self.progress_string = description
|
self.progress_string = description
|
||||||
@ -4336,7 +4336,7 @@ class CatalogBuilder:
|
|||||||
self.opts.log(log_msg)
|
self.opts.log(log_msg)
|
||||||
|
|
||||||
def update_progress_micro_step(self, description, micro_step_pct):
|
def update_progress_micro_step(self, description, micro_step_pct):
|
||||||
""" Update calibre's job status UI.
|
''' Update calibre's job status UI.
|
||||||
|
|
||||||
Called from steps requiring more time:
|
Called from steps requiring more time:
|
||||||
generate_html_descriptions()
|
generate_html_descriptions()
|
||||||
@ -4348,7 +4348,7 @@ class CatalogBuilder:
|
|||||||
|
|
||||||
Results:
|
Results:
|
||||||
(UI): Jobs UI updated
|
(UI): Jobs UI updated
|
||||||
"""
|
'''
|
||||||
|
|
||||||
step_range = 100 / self.total_steps
|
step_range = 100 / self.total_steps
|
||||||
self.progress_string = description
|
self.progress_string = description
|
||||||
|
@ -24,7 +24,7 @@ HTTP_METHODS = {'HEAD', 'GET', 'PUT', 'POST', 'TRACE', 'DELETE', 'OPTIONS'}
|
|||||||
|
|
||||||
|
|
||||||
def parse_request_uri(uri):
|
def parse_request_uri(uri):
|
||||||
"""Parse a Request-URI into (scheme, authority, path).
|
'''Parse a Request-URI into (scheme, authority, path).
|
||||||
|
|
||||||
Note that Request-URI's must be one of::
|
Note that Request-URI's must be one of::
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ def parse_request_uri(uri):
|
|||||||
path_segments = segment *( "/" segment )
|
path_segments = segment *( "/" segment )
|
||||||
segment = *pchar *( ";" param )
|
segment = *pchar *( ";" param )
|
||||||
param = *pchar
|
param = *pchar
|
||||||
"""
|
'''
|
||||||
if uri == b'*':
|
if uri == b'*':
|
||||||
return None, None, uri
|
return None, None, uri
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
""" Collection of python utility-methodes commonly used by other
|
''' Collection of python utility-methodes commonly used by other
|
||||||
bibliograph packages.
|
bibliograph packages.
|
||||||
From http://pypi.python.org/pypi/bibliograph.core/
|
From http://pypi.python.org/pypi/bibliograph.core/
|
||||||
from Tom Gross <itconsense@gmail.com>
|
from Tom Gross <itconsense@gmail.com>
|
||||||
@ -58,7 +58,7 @@
|
|||||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
|
||||||
DAMAGE.
|
DAMAGE.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
__author__ = 'sengian <sengian1 at gmail.com>'
|
__author__ = 'sengian <sengian1 at gmail.com>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
@ -337,7 +337,7 @@ class FFMLProcessor:
|
|||||||
return self.tree_to_html(tree, 0)
|
return self.tree_to_html(tree, 0)
|
||||||
|
|
||||||
def document_to_summary_html(self, document, name, safe=True):
|
def document_to_summary_html(self, document, name, safe=True):
|
||||||
"""
|
'''
|
||||||
Given a document in the Formatter Function Markup Language (FFML), return
|
Given a document in the Formatter Function Markup Language (FFML), return
|
||||||
that document's summary in HTML format.
|
that document's summary in HTML format.
|
||||||
|
|
||||||
@ -349,7 +349,7 @@ class FFMLProcessor:
|
|||||||
|
|
||||||
:return: a string containing the HTML
|
:return: a string containing the HTML
|
||||||
|
|
||||||
"""
|
'''
|
||||||
document = document.strip()
|
document = document.strip()
|
||||||
sum_tag = document.find('[/]')
|
sum_tag = document.find('[/]')
|
||||||
if sum_tag > 0:
|
if sum_tag > 0:
|
||||||
@ -453,7 +453,7 @@ class FFMLProcessor:
|
|||||||
return doc
|
return doc
|
||||||
|
|
||||||
def document_to_summary_rst(self, document, name, indent=0, prefix=None, safe=True):
|
def document_to_summary_rst(self, document, name, indent=0, prefix=None, safe=True):
|
||||||
"""
|
'''
|
||||||
Given a document in the Formatter Function Markup Language (FFML), return
|
Given a document in the Formatter Function Markup Language (FFML), return
|
||||||
that document's summary in RST (sphinx reStructuredText) format.
|
that document's summary in RST (sphinx reStructuredText) format.
|
||||||
|
|
||||||
@ -471,7 +471,7 @@ class FFMLProcessor:
|
|||||||
|
|
||||||
:return: a string containing the RST text
|
:return: a string containing the RST text
|
||||||
|
|
||||||
"""
|
'''
|
||||||
document = document.strip()
|
document = document.strip()
|
||||||
sum_tag = document.find('[/]')
|
sum_tag = document.find('[/]')
|
||||||
if sum_tag > 0:
|
if sum_tag > 0:
|
||||||
|
@ -175,7 +175,7 @@ def png(h):
|
|||||||
|
|
||||||
@test
|
@test
|
||||||
def gif(h):
|
def gif(h):
|
||||||
"""GIF ('87 and '89 variants)"""
|
'''GIF ('87 and '89 variants)'''
|
||||||
if h[:6] in (b'GIF87a', b'GIF89a'):
|
if h[:6] in (b'GIF87a', b'GIF89a'):
|
||||||
return 'gif'
|
return 'gif'
|
||||||
|
|
||||||
|
@ -526,14 +526,14 @@ def smartyPants(text, attr='1'):
|
|||||||
|
|
||||||
|
|
||||||
def educateQuotes(text):
|
def educateQuotes(text):
|
||||||
"""
|
'''
|
||||||
Parameter: String.
|
Parameter: String.
|
||||||
|
|
||||||
Returns: The string, with "educated" curly quote HTML entities.
|
Returns: The string, with "educated" curly quote HTML entities.
|
||||||
|
|
||||||
Example input: "Isn't this fun?"
|
Example input: "Isn't this fun?"
|
||||||
Example output: “Isn’t this fun?”
|
Example output: “Isn’t this fun?”
|
||||||
"""
|
'''
|
||||||
|
|
||||||
punct_class = r'''[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]'''
|
punct_class = r'''[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]'''
|
||||||
|
|
||||||
@ -572,12 +572,12 @@ def educateQuotes(text):
|
|||||||
# meaningful
|
# meaningful
|
||||||
|
|
||||||
# Special case for Quotes at end of line with a preceding space (may change just to end of line)
|
# Special case for Quotes at end of line with a preceding space (may change just to end of line)
|
||||||
# text = re.sub(r"""(?<=\s)"$""", r"""”""", text)
|
# text = re.sub(r'''(?<=\s)"$''', r'''”''', text)
|
||||||
# text = re.sub(r"""(?<=\s)'$""", r"""’""", text)
|
# text = re.sub(r'''(?<=\s)'$''', r'''’''', text)
|
||||||
|
|
||||||
# Special case for Quotes at beginning of line with a space - multiparagraph quoted text:
|
# Special case for Quotes at beginning of line with a space - multiparagraph quoted text:
|
||||||
# text = re.sub(r"""^"(?=\s)""", r"""“""", text)
|
# text = re.sub(r'''^"(?=\s)''', r'''“''', text)
|
||||||
# text = re.sub(r"""^'(?=\s)""", r"""‘""", text)
|
# text = re.sub(r'''^'(?=\s)''', r'''‘''', text)
|
||||||
|
|
||||||
close_class = r'''[^\ \t\r\n\[\{\(\-]'''
|
close_class = r'''[^\ \t\r\n\[\{\(\-]'''
|
||||||
dec_dashes = r'''–|—'''
|
dec_dashes = r'''–|—'''
|
||||||
@ -654,13 +654,13 @@ def educateQuotes(text):
|
|||||||
|
|
||||||
|
|
||||||
def educateBackticks(text):
|
def educateBackticks(text):
|
||||||
"""
|
'''
|
||||||
Parameter: String.
|
Parameter: String.
|
||||||
Returns: The string, with ``backticks'' -style double quotes
|
Returns: The string, with ``backticks'' -style double quotes
|
||||||
translated into HTML curly quote entities.
|
translated into HTML curly quote entities.
|
||||||
Example input: ``Isn't this fun?''
|
Example input: ``Isn't this fun?''
|
||||||
Example output: “Isn't this fun?”
|
Example output: “Isn't this fun?”
|
||||||
"""
|
'''
|
||||||
|
|
||||||
text = re.sub(r'''``''', r'''“''', text)
|
text = re.sub(r'''``''', r'''“''', text)
|
||||||
text = re.sub(r"""''""", r'''”''', text)
|
text = re.sub(r"""''""", r'''”''', text)
|
||||||
@ -668,14 +668,14 @@ def educateBackticks(text):
|
|||||||
|
|
||||||
|
|
||||||
def educateSingleBackticks(text):
|
def educateSingleBackticks(text):
|
||||||
"""
|
'''
|
||||||
Parameter: String.
|
Parameter: String.
|
||||||
Returns: The string, with `backticks' -style single quotes
|
Returns: The string, with `backticks' -style single quotes
|
||||||
translated into HTML curly quote entities.
|
translated into HTML curly quote entities.
|
||||||
|
|
||||||
Example input: `Isn't this fun?'
|
Example input: `Isn't this fun?'
|
||||||
Example output: ‘Isn’t this fun?’
|
Example output: ‘Isn’t this fun?’
|
||||||
"""
|
'''
|
||||||
|
|
||||||
text = re.sub(r'''`''', r'''‘''', text)
|
text = re.sub(r'''`''', r'''‘''', text)
|
||||||
text = re.sub(r"""'""", r'''’''', text)
|
text = re.sub(r"""'""", r'''’''', text)
|
||||||
@ -710,7 +710,7 @@ def educateDashesOldSchool(text):
|
|||||||
|
|
||||||
|
|
||||||
def educateDashesOldSchoolInverted(text):
|
def educateDashesOldSchoolInverted(text):
|
||||||
"""
|
'''
|
||||||
Parameter: String.
|
Parameter: String.
|
||||||
|
|
||||||
Returns: The string, with each instance of "--" translated to
|
Returns: The string, with each instance of "--" translated to
|
||||||
@ -723,7 +723,7 @@ def educateDashesOldSchoolInverted(text):
|
|||||||
common than en-dashes, and so it sort of makes sense that
|
common than en-dashes, and so it sort of makes sense that
|
||||||
the shortcut should be shorter to type. (Thanks to Aaron
|
the shortcut should be shorter to type. (Thanks to Aaron
|
||||||
Swartz for the idea.)
|
Swartz for the idea.)
|
||||||
"""
|
'''
|
||||||
text = re.sub(r'''---''', r'''–''', text) # em
|
text = re.sub(r'''---''', r'''–''', text) # em
|
||||||
text = re.sub(r'''--''', r'''—''', text) # en
|
text = re.sub(r'''--''', r'''—''', text) # en
|
||||||
return text
|
return text
|
||||||
@ -769,7 +769,7 @@ def stupefyEntities(text):
|
|||||||
|
|
||||||
|
|
||||||
def processEscapes(text):
|
def processEscapes(text):
|
||||||
r"""
|
r'''
|
||||||
Parameter: String.
|
Parameter: String.
|
||||||
Returns: The string, with after processing the following backslash
|
Returns: The string, with after processing the following backslash
|
||||||
escape sequences. This is useful if you want to force a "dumb"
|
escape sequences. This is useful if you want to force a "dumb"
|
||||||
@ -783,7 +783,7 @@ def processEscapes(text):
|
|||||||
\. .
|
\. .
|
||||||
\- -
|
\- -
|
||||||
\` `
|
\` `
|
||||||
"""
|
'''
|
||||||
text = re.sub(r'''\\\\''', r'''\''', text)
|
text = re.sub(r'''\\\\''', r'''\''', text)
|
||||||
text = re.sub(r'''\\"''', r'''"''', text)
|
text = re.sub(r'''\\"''', r'''"''', text)
|
||||||
text = re.sub(r"""\\'""", r''''''', text)
|
text = re.sub(r"""\\'""", r''''''', text)
|
||||||
@ -795,7 +795,7 @@ def processEscapes(text):
|
|||||||
|
|
||||||
|
|
||||||
def _tokenize(html):
|
def _tokenize(html):
|
||||||
"""
|
'''
|
||||||
Parameter: String containing HTML markup.
|
Parameter: String containing HTML markup.
|
||||||
Returns: Reference to an array of the tokens comprising the input
|
Returns: Reference to an array of the tokens comprising the input
|
||||||
string. Each token is either a tag (possibly with nested,
|
string. Each token is either a tag (possibly with nested,
|
||||||
@ -806,15 +806,15 @@ def _tokenize(html):
|
|||||||
|
|
||||||
Based on the _tokenize() subroutine from Brad Choate's MTRegex plugin.
|
Based on the _tokenize() subroutine from Brad Choate's MTRegex plugin.
|
||||||
<http://www.bradchoate.com/past/mtregex.php>
|
<http://www.bradchoate.com/past/mtregex.php>
|
||||||
"""
|
'''
|
||||||
|
|
||||||
tokens = []
|
tokens = []
|
||||||
|
|
||||||
# depth = 6
|
# depth = 6
|
||||||
# nested_tags = "|".join(['(?:<(?:[^<>]',] * depth) + (')*>)' * depth)
|
# nested_tags = "|".join(['(?:<(?:[^<>]',] * depth) + (')*>)' * depth)
|
||||||
# match = r"""(?: <! ( -- .*? -- \s* )+ > ) | # comments
|
# match = r'''(?: <! ( -- .*? -- \s* )+ > ) | # comments
|
||||||
# (?: <\? .*? \?> ) | # directives
|
# (?: <\? .*? \?> ) | # directives
|
||||||
# %s # nested tags """ % (nested_tags,)
|
# %s # nested tags ''' % (nested_tags,)
|
||||||
tag_soup = re.compile(r'''([^<]*)(<[^>]*>)''')
|
tag_soup = re.compile(r'''([^<]*)(<[^>]*>)''')
|
||||||
|
|
||||||
token_match = tag_soup.search(html)
|
token_match = tag_soup.search(html)
|
||||||
|
@ -82,13 +82,13 @@ class SMTPServerDisconnected(SMTPException):
|
|||||||
|
|
||||||
|
|
||||||
class SMTPResponseException(SMTPException):
|
class SMTPResponseException(SMTPException):
|
||||||
"""Base class for all exceptions that include an SMTP error code.
|
'''Base class for all exceptions that include an SMTP error code.
|
||||||
|
|
||||||
These exceptions are generated in some instances when the SMTP
|
These exceptions are generated in some instances when the SMTP
|
||||||
server returns an error code. The error code is stored in the
|
server returns an error code. The error code is stored in the
|
||||||
`smtp_code' attribute of the error, and the `smtp_error' attribute
|
`smtp_code' attribute of the error, and the `smtp_error' attribute
|
||||||
is set to the error message.
|
is set to the error message.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self, code, msg):
|
def __init__(self, code, msg):
|
||||||
self.smtp_code = code
|
self.smtp_code = code
|
||||||
@ -97,11 +97,11 @@ class SMTPResponseException(SMTPException):
|
|||||||
|
|
||||||
|
|
||||||
class SMTPSenderRefused(SMTPResponseException):
|
class SMTPSenderRefused(SMTPResponseException):
|
||||||
"""Sender address refused.
|
'''Sender address refused.
|
||||||
|
|
||||||
In addition to the attributes set by on all SMTPResponseException
|
In addition to the attributes set by on all SMTPResponseException
|
||||||
exceptions, this sets `sender' to the string that the SMTP refused.
|
exceptions, this sets `sender' to the string that the SMTP refused.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self, code, msg, sender):
|
def __init__(self, code, msg, sender):
|
||||||
self.smtp_code = code
|
self.smtp_code = code
|
||||||
@ -111,12 +111,12 @@ class SMTPSenderRefused(SMTPResponseException):
|
|||||||
|
|
||||||
|
|
||||||
class SMTPRecipientsRefused(SMTPException):
|
class SMTPRecipientsRefused(SMTPException):
|
||||||
"""All recipient addresses refused.
|
'''All recipient addresses refused.
|
||||||
|
|
||||||
The errors for each recipient are accessible through the attribute
|
The errors for each recipient are accessible through the attribute
|
||||||
'recipients', which is a dictionary of exactly the same sort as
|
'recipients', which is a dictionary of exactly the same sort as
|
||||||
SMTP.sendmail() returns.
|
SMTP.sendmail() returns.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def __init__(self, recipients):
|
def __init__(self, recipients):
|
||||||
self.recipients = recipients
|
self.recipients = recipients
|
||||||
@ -124,7 +124,7 @@ class SMTPRecipientsRefused(SMTPException):
|
|||||||
|
|
||||||
|
|
||||||
class SMTPDataError(SMTPResponseException):
|
class SMTPDataError(SMTPResponseException):
|
||||||
"""The SMTP server didn't accept the data."""
|
'''The SMTP server didn't accept the data.'''
|
||||||
|
|
||||||
|
|
||||||
class SMTPConnectError(SMTPResponseException):
|
class SMTPConnectError(SMTPResponseException):
|
||||||
@ -136,11 +136,11 @@ class SMTPHeloError(SMTPResponseException):
|
|||||||
|
|
||||||
|
|
||||||
class SMTPAuthenticationError(SMTPResponseException):
|
class SMTPAuthenticationError(SMTPResponseException):
|
||||||
"""Authentication error.
|
'''Authentication error.
|
||||||
|
|
||||||
Most probably the server didn't accept the username/password
|
Most probably the server didn't accept the username/password
|
||||||
combination provided.
|
combination provided.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
|
|
||||||
def quoteaddr(addr):
|
def quoteaddr(addr):
|
||||||
@ -172,11 +172,11 @@ def _addr_only(addrstring):
|
|||||||
|
|
||||||
|
|
||||||
def quotedata(data):
|
def quotedata(data):
|
||||||
"""Quote data for email.
|
'''Quote data for email.
|
||||||
|
|
||||||
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
|
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
|
||||||
Internet CRLF end-of-line.
|
Internet CRLF end-of-line.
|
||||||
"""
|
'''
|
||||||
return re.sub(r'(?m)^\.', '..',
|
return re.sub(r'(?m)^\.', '..',
|
||||||
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
|
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
|
||||||
|
|
||||||
@ -216,7 +216,7 @@ else:
|
|||||||
|
|
||||||
|
|
||||||
class SMTP:
|
class SMTP:
|
||||||
"""This class manages a connection to an SMTP or ESMTP server.
|
'''This class manages a connection to an SMTP or ESMTP server.
|
||||||
SMTP Objects:
|
SMTP Objects:
|
||||||
SMTP objects have the following attributes:
|
SMTP objects have the following attributes:
|
||||||
helo_resp
|
helo_resp
|
||||||
@ -243,7 +243,7 @@ class SMTP:
|
|||||||
See each method's docstrings for details. In general, there is a
|
See each method's docstrings for details. In general, there is a
|
||||||
method of the same name to perform each SMTP command. There is also a
|
method of the same name to perform each SMTP command. There is also a
|
||||||
method called 'sendmail' that will do an entire mail transaction.
|
method called 'sendmail' that will do an entire mail transaction.
|
||||||
"""
|
'''
|
||||||
debuglevel = 0
|
debuglevel = 0
|
||||||
file = None
|
file = None
|
||||||
helo_resp = None
|
helo_resp = None
|
||||||
@ -255,7 +255,7 @@ class SMTP:
|
|||||||
def __init__(self, host='', port=0, local_hostname=None,
|
def __init__(self, host='', port=0, local_hostname=None,
|
||||||
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
|
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
|
||||||
debug_to=partial(print, file=stderr)):
|
debug_to=partial(print, file=stderr)):
|
||||||
"""Initialize a new instance.
|
'''Initialize a new instance.
|
||||||
|
|
||||||
If specified, `host' is the name of the remote host to which to
|
If specified, `host' is the name of the remote host to which to
|
||||||
connect. If specified, `port' specifies the port to which to connect.
|
connect. If specified, `port' specifies the port to which to connect.
|
||||||
@ -266,7 +266,7 @@ class SMTP:
|
|||||||
specifies where debug output is written to. By default it is written to
|
specifies where debug output is written to. By default it is written to
|
||||||
sys.stderr. You should pass in a print function of your own to control
|
sys.stderr. You should pass in a print function of your own to control
|
||||||
where debug output is written.
|
where debug output is written.
|
||||||
"""
|
'''
|
||||||
self._host = host
|
self._host = host
|
||||||
self.timeout = timeout
|
self.timeout = timeout
|
||||||
self.debug = debug_to
|
self.debug = debug_to
|
||||||
@ -312,7 +312,7 @@ class SMTP:
|
|||||||
return socket.create_connection((host, port), timeout)
|
return socket.create_connection((host, port), timeout)
|
||||||
|
|
||||||
def connect(self, host='localhost', port=0):
|
def connect(self, host='localhost', port=0):
|
||||||
"""Connect to a host on a given port.
|
'''Connect to a host on a given port.
|
||||||
|
|
||||||
If the hostname ends with a colon (`:') followed by a number, and
|
If the hostname ends with a colon (`:') followed by a number, and
|
||||||
there is no port specified, that suffix will be stripped off and the
|
there is no port specified, that suffix will be stripped off and the
|
||||||
@ -321,7 +321,7 @@ class SMTP:
|
|||||||
Note: This method is automatically invoked by __init__, if a host is
|
Note: This method is automatically invoked by __init__, if a host is
|
||||||
specified during instantiation.
|
specified during instantiation.
|
||||||
|
|
||||||
"""
|
'''
|
||||||
if not port and (host.find(':') == host.rfind(':')):
|
if not port and (host.find(':') == host.rfind(':')):
|
||||||
i = host.rfind(':')
|
i = host.rfind(':')
|
||||||
if i >= 0:
|
if i >= 0:
|
||||||
@ -342,7 +342,7 @@ class SMTP:
|
|||||||
return (code, msg)
|
return (code, msg)
|
||||||
|
|
||||||
def send(self, str):
|
def send(self, str):
|
||||||
"""Send `str' to the server."""
|
'''Send `str' to the server.'''
|
||||||
if self.debuglevel > 0:
|
if self.debuglevel > 0:
|
||||||
raw = repr(str)
|
raw = repr(str)
|
||||||
self.debug('send:', raw)
|
self.debug('send:', raw)
|
||||||
@ -364,7 +364,7 @@ class SMTP:
|
|||||||
self.send(str)
|
self.send(str)
|
||||||
|
|
||||||
def getreply(self):
|
def getreply(self):
|
||||||
"""Get a reply from the server.
|
'''Get a reply from the server.
|
||||||
|
|
||||||
Returns a tuple consisting of:
|
Returns a tuple consisting of:
|
||||||
|
|
||||||
@ -375,7 +375,7 @@ class SMTP:
|
|||||||
responses are converted to a single, multiline string).
|
responses are converted to a single, multiline string).
|
||||||
|
|
||||||
Raises SMTPServerDisconnected if end-of-file is reached.
|
Raises SMTPServerDisconnected if end-of-file is reached.
|
||||||
"""
|
'''
|
||||||
resp = []
|
resp = []
|
||||||
if self.file is None:
|
if self.file is None:
|
||||||
self.file = self.sock.makefile('rb')
|
self.file = self.sock.makefile('rb')
|
||||||
@ -417,20 +417,20 @@ class SMTP:
|
|||||||
|
|
||||||
# std smtp commands
|
# std smtp commands
|
||||||
def helo(self, name=''):
|
def helo(self, name=''):
|
||||||
"""SMTP 'helo' command.
|
'''SMTP 'helo' command.
|
||||||
Hostname to send for this command defaults to the FQDN of the local
|
Hostname to send for this command defaults to the FQDN of the local
|
||||||
host.
|
host.
|
||||||
"""
|
'''
|
||||||
self.putcmd('helo', name or self.local_hostname)
|
self.putcmd('helo', name or self.local_hostname)
|
||||||
(code, msg) = self.getreply()
|
(code, msg) = self.getreply()
|
||||||
self.helo_resp = msg
|
self.helo_resp = msg
|
||||||
return (code, msg)
|
return (code, msg)
|
||||||
|
|
||||||
def ehlo(self, name=''):
|
def ehlo(self, name=''):
|
||||||
""" SMTP 'ehlo' command.
|
''' SMTP 'ehlo' command.
|
||||||
Hostname to send for this command defaults to the FQDN of the local
|
Hostname to send for this command defaults to the FQDN of the local
|
||||||
host.
|
host.
|
||||||
"""
|
'''
|
||||||
self.esmtp_features = {}
|
self.esmtp_features = {}
|
||||||
self.putcmd(self.ehlo_msg, name or self.local_hostname)
|
self.putcmd(self.ehlo_msg, name or self.local_hostname)
|
||||||
(code, msg) = self.getreply()
|
(code, msg) = self.getreply()
|
||||||
@ -481,21 +481,21 @@ class SMTP:
|
|||||||
return opt.lower() in self.esmtp_features
|
return opt.lower() in self.esmtp_features
|
||||||
|
|
||||||
def help(self, args=''):
|
def help(self, args=''):
|
||||||
"""SMTP 'help' command.
|
'''SMTP 'help' command.
|
||||||
Returns help text from server."""
|
Returns help text from server.'''
|
||||||
self.putcmd('help', args)
|
self.putcmd('help', args)
|
||||||
return self.getreply()[1]
|
return self.getreply()[1]
|
||||||
|
|
||||||
def rset(self):
|
def rset(self):
|
||||||
"""SMTP 'rset' command -- resets session."""
|
'''SMTP 'rset' command -- resets session.'''
|
||||||
return self.docmd('rset')
|
return self.docmd('rset')
|
||||||
|
|
||||||
def noop(self):
|
def noop(self):
|
||||||
"""SMTP 'noop' command -- doesn't do anything :>"""
|
'''SMTP 'noop' command -- doesn't do anything :>'''
|
||||||
return self.docmd('noop')
|
return self.docmd('noop')
|
||||||
|
|
||||||
def mail(self, sender, options=[]):
|
def mail(self, sender, options=[]):
|
||||||
"""SMTP 'mail' command -- begins mail xfer session."""
|
'''SMTP 'mail' command -- begins mail xfer session.'''
|
||||||
optionlist = ''
|
optionlist = ''
|
||||||
if options and self.does_esmtp:
|
if options and self.does_esmtp:
|
||||||
optionlist = ' ' + ' '.join(options)
|
optionlist = ' ' + ' '.join(options)
|
||||||
@ -503,7 +503,7 @@ class SMTP:
|
|||||||
return self.getreply()
|
return self.getreply()
|
||||||
|
|
||||||
def rcpt(self, recip, options=[]):
|
def rcpt(self, recip, options=[]):
|
||||||
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
|
'''SMTP 'rcpt' command -- indicates 1 recipient for this mail.'''
|
||||||
optionlist = ''
|
optionlist = ''
|
||||||
if options and self.does_esmtp:
|
if options and self.does_esmtp:
|
||||||
optionlist = ' ' + ' '.join(options)
|
optionlist = ' ' + ' '.join(options)
|
||||||
@ -511,13 +511,13 @@ class SMTP:
|
|||||||
return self.getreply()
|
return self.getreply()
|
||||||
|
|
||||||
def data(self, msg):
|
def data(self, msg):
|
||||||
"""SMTP 'DATA' command -- sends message data to server.
|
'''SMTP 'DATA' command -- sends message data to server.
|
||||||
|
|
||||||
Automatically quotes lines beginning with a period per rfc821.
|
Automatically quotes lines beginning with a period per rfc821.
|
||||||
Raises SMTPDataError if there is an unexpected reply to the
|
Raises SMTPDataError if there is an unexpected reply to the
|
||||||
DATA command; the return value from this method is the final
|
DATA command; the return value from this method is the final
|
||||||
response code received when the all data is sent.
|
response code received when the all data is sent.
|
||||||
"""
|
'''
|
||||||
self.putcmd('data')
|
self.putcmd('data')
|
||||||
(code, repl) = self.getreply()
|
(code, repl) = self.getreply()
|
||||||
if self.debuglevel > 0:
|
if self.debuglevel > 0:
|
||||||
@ -536,21 +536,21 @@ class SMTP:
|
|||||||
return (code, msg)
|
return (code, msg)
|
||||||
|
|
||||||
def verify(self, address):
|
def verify(self, address):
|
||||||
"""SMTP 'verify' command -- checks for address validity."""
|
'''SMTP 'verify' command -- checks for address validity.'''
|
||||||
self.putcmd('vrfy', _addr_only(address))
|
self.putcmd('vrfy', _addr_only(address))
|
||||||
return self.getreply()
|
return self.getreply()
|
||||||
# a.k.a.
|
# a.k.a.
|
||||||
vrfy = verify
|
vrfy = verify
|
||||||
|
|
||||||
def expn(self, address):
|
def expn(self, address):
|
||||||
"""SMTP 'expn' command -- expands a mailing list."""
|
'''SMTP 'expn' command -- expands a mailing list.'''
|
||||||
self.putcmd('expn', _addr_only(address))
|
self.putcmd('expn', _addr_only(address))
|
||||||
return self.getreply()
|
return self.getreply()
|
||||||
|
|
||||||
# some useful methods
|
# some useful methods
|
||||||
|
|
||||||
def ehlo_or_helo_if_needed(self):
|
def ehlo_or_helo_if_needed(self):
|
||||||
"""Call self.ehlo() and/or self.helo() if needed.
|
'''Call self.ehlo() and/or self.helo() if needed.
|
||||||
|
|
||||||
If there has been no previous EHLO or HELO command this session, this
|
If there has been no previous EHLO or HELO command this session, this
|
||||||
method tries ESMTP EHLO first.
|
method tries ESMTP EHLO first.
|
||||||
@ -559,7 +559,7 @@ class SMTP:
|
|||||||
|
|
||||||
SMTPHeloError The server didn't reply properly to
|
SMTPHeloError The server didn't reply properly to
|
||||||
the helo greeting.
|
the helo greeting.
|
||||||
"""
|
'''
|
||||||
if self.helo_resp is None and self.ehlo_resp is None:
|
if self.helo_resp is None and self.ehlo_resp is None:
|
||||||
if not (200 <= self.ehlo()[0] <= 299):
|
if not (200 <= self.ehlo()[0] <= 299):
|
||||||
(code, resp) = self.helo()
|
(code, resp) = self.helo()
|
||||||
@ -567,7 +567,7 @@ class SMTP:
|
|||||||
raise SMTPHeloError(code, resp)
|
raise SMTPHeloError(code, resp)
|
||||||
|
|
||||||
def login(self, user, password):
|
def login(self, user, password):
|
||||||
"""Log in on an SMTP server that requires authentication.
|
'''Log in on an SMTP server that requires authentication.
|
||||||
|
|
||||||
The arguments are:
|
The arguments are:
|
||||||
- user: The user name to authenticate with.
|
- user: The user name to authenticate with.
|
||||||
@ -586,7 +586,7 @@ class SMTP:
|
|||||||
password combination.
|
password combination.
|
||||||
SMTPException No suitable authentication method was
|
SMTPException No suitable authentication method was
|
||||||
found.
|
found.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
def encode_cram_md5(challenge, user, password):
|
def encode_cram_md5(challenge, user, password):
|
||||||
challenge = base64.decodestring(challenge)
|
challenge = base64.decodestring(challenge)
|
||||||
@ -646,7 +646,7 @@ class SMTP:
|
|||||||
return (code, resp)
|
return (code, resp)
|
||||||
|
|
||||||
def starttls(self, context=None):
|
def starttls(self, context=None):
|
||||||
"""Puts the connection to the SMTP server into TLS mode.
|
'''Puts the connection to the SMTP server into TLS mode.
|
||||||
|
|
||||||
If there has been no previous EHLO or HELO command this session, this
|
If there has been no previous EHLO or HELO command this session, this
|
||||||
method tries ESMTP EHLO first.
|
method tries ESMTP EHLO first.
|
||||||
@ -661,7 +661,7 @@ class SMTP:
|
|||||||
|
|
||||||
SMTPHeloError The server didn't reply properly to
|
SMTPHeloError The server didn't reply properly to
|
||||||
the helo greeting.
|
the helo greeting.
|
||||||
"""
|
'''
|
||||||
self.ehlo_or_helo_if_needed()
|
self.ehlo_or_helo_if_needed()
|
||||||
if not self.has_extn('starttls'):
|
if not self.has_extn('starttls'):
|
||||||
raise SMTPException('STARTTLS extension not supported by server.')
|
raise SMTPException('STARTTLS extension not supported by server.')
|
||||||
@ -744,8 +744,8 @@ class SMTP:
|
|||||||
of the four addresses, and one was rejected, with the error code
|
of the four addresses, and one was rejected, with the error code
|
||||||
550. If all addresses are accepted, then the method will return an
|
550. If all addresses are accepted, then the method will return an
|
||||||
empty dictionary.
|
empty dictionary.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.ehlo_or_helo_if_needed()
|
self.ehlo_or_helo_if_needed()
|
||||||
esmtp_opts = []
|
esmtp_opts = []
|
||||||
if self.does_esmtp:
|
if self.does_esmtp:
|
||||||
@ -805,13 +805,13 @@ class SMTP:
|
|||||||
if _have_ssl:
|
if _have_ssl:
|
||||||
|
|
||||||
class SMTP_SSL(SMTP):
|
class SMTP_SSL(SMTP):
|
||||||
""" This is a subclass derived from SMTP that connects over an SSL encrypted
|
''' This is a subclass derived from SMTP that connects over an SSL encrypted
|
||||||
socket (to use this class you need a socket module that was compiled with SSL
|
socket (to use this class you need a socket module that was compiled with SSL
|
||||||
support). If host is not specified, '' (the local host) is used. If port is
|
support). If host is not specified, '' (the local host) is used. If port is
|
||||||
omitted, the standard SMTP-over-SSL port (465) is used. keyfile and certfile
|
omitted, the standard SMTP-over-SSL port (465) is used. keyfile and certfile
|
||||||
are also optional - they can contain a PEM formatted private key and
|
are also optional - they can contain a PEM formatted private key and
|
||||||
certificate chain file for the SSL connection.
|
certificate chain file for the SSL connection.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
default_port = SMTP_SSL_PORT
|
default_port = SMTP_SSL_PORT
|
||||||
|
|
||||||
@ -841,7 +841,7 @@ LMTP_PORT = 2003
|
|||||||
|
|
||||||
|
|
||||||
class LMTP(SMTP):
|
class LMTP(SMTP):
|
||||||
"""LMTP - Local Mail Transfer Protocol
|
'''LMTP - Local Mail Transfer Protocol
|
||||||
|
|
||||||
The LMTP protocol, which is very similar to ESMTP, is heavily based
|
The LMTP protocol, which is very similar to ESMTP, is heavily based
|
||||||
on the standard SMTP client. It's common to use Unix sockets for LMTP,
|
on the standard SMTP client. It's common to use Unix sockets for LMTP,
|
||||||
@ -851,7 +851,7 @@ class LMTP(SMTP):
|
|||||||
|
|
||||||
Authentication is supported, using the regular SMTP mechanism. When
|
Authentication is supported, using the regular SMTP mechanism. When
|
||||||
using a Unix socket, LMTP generally don't support or require any
|
using a Unix socket, LMTP generally don't support or require any
|
||||||
authentication, but your mileage might vary."""
|
authentication, but your mileage might vary.'''
|
||||||
|
|
||||||
ehlo_msg = 'lhlo'
|
ehlo_msg = 'lhlo'
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
"""Easy to use object-oriented thread pool framework.
|
'''Easy to use object-oriented thread pool framework.
|
||||||
|
|
||||||
A thread pool is an object that maintains a pool of worker threads to perform
|
A thread pool is an object that maintains a pool of worker threads to perform
|
||||||
time consuming operations in parallel. It assigns jobs to the threads
|
time consuming operations in parallel. It assigns jobs to the threads
|
||||||
@ -28,7 +28,7 @@ Basic usage:
|
|||||||
See the end of the module code for a brief, annotated usage example.
|
See the end of the module code for a brief, annotated usage example.
|
||||||
|
|
||||||
Website : http://chrisarndt.de/en/software/python/threadpool/
|
Website : http://chrisarndt.de/en/software/python/threadpool/
|
||||||
"""
|
'''
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'makeRequests',
|
'makeRequests',
|
||||||
@ -239,7 +239,7 @@ class ThreadPool:
|
|||||||
|
|
||||||
|
|
||||||
def makeRequests(callable, args_list, callback=None, exc_callback=None):
|
def makeRequests(callable, args_list, callback=None, exc_callback=None):
|
||||||
"""Create several work requests for same callable with different arguments.
|
'''Create several work requests for same callable with different arguments.
|
||||||
|
|
||||||
Convenience function for creating several work requests for the same
|
Convenience function for creating several work requests for the same
|
||||||
callable where each invocation of the callable receives different values
|
callable where each invocation of the callable receives different values
|
||||||
@ -251,7 +251,7 @@ def makeRequests(callable, args_list, callback=None, exc_callback=None):
|
|||||||
non-tuple argument.
|
non-tuple argument.
|
||||||
|
|
||||||
See docstring for WorkRequest for info on callback and exc_callback.
|
See docstring for WorkRequest for info on callback and exc_callback.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
requests = []
|
requests = []
|
||||||
for item in args_list:
|
for item in args_list:
|
||||||
|
@ -43,7 +43,7 @@ def lang():
|
|||||||
|
|
||||||
|
|
||||||
def titlecase(text):
|
def titlecase(text):
|
||||||
"""
|
'''
|
||||||
Titlecases input text
|
Titlecases input text
|
||||||
|
|
||||||
This filter changes all words to Title Caps, and attempts to be clever
|
This filter changes all words to Title Caps, and attempts to be clever
|
||||||
@ -52,7 +52,7 @@ def titlecase(text):
|
|||||||
The list of "SMALL words" which are not capped comes from
|
The list of "SMALL words" which are not capped comes from
|
||||||
the New York Times Manual of Style, plus 'vs' and 'v'.
|
the New York Times Manual of Style, plus 'vs' and 'v'.
|
||||||
|
|
||||||
"""
|
'''
|
||||||
|
|
||||||
all_caps = icu_upper(text) == text
|
all_caps = icu_upper(text) == text
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
|
||||||
"""
|
'''
|
||||||
Get word, character, and Asian character counts
|
Get word, character, and Asian character counts
|
||||||
|
|
||||||
1. Get a word count as a dictionary:
|
1. Get a word count as a dictionary:
|
||||||
@ -22,7 +22,7 @@ properties counted:
|
|||||||
Sourced from:
|
Sourced from:
|
||||||
http://ginstrom.com/scribbles/2008/05/17/counting-words-etc-in-an-html-file-with-python/
|
http://ginstrom.com/scribbles/2008/05/17/counting-words-etc-in-an-html-file-with-python/
|
||||||
http://ginstrom.com/scribbles/2007/10/06/counting-words-characters-and-asian-characters-with-python/
|
http://ginstrom.com/scribbles/2007/10/06/counting-words-characters-and-asian-characters-with-python/
|
||||||
"""
|
'''
|
||||||
__version__ = 0.1
|
__version__ = 0.1
|
||||||
__author__ = 'Ryan Ginstrom'
|
__author__ = 'Ryan Ginstrom'
|
||||||
|
|
||||||
|
@ -913,8 +913,8 @@ class ZipFile:
|
|||||||
self.write(filename, arcname, compress_type)
|
self.write(filename, arcname, compress_type)
|
||||||
|
|
||||||
def replacestr(self, zinfo, byts):
|
def replacestr(self, zinfo, byts):
|
||||||
"""Delete zinfo.filename, and write a new file into the archive. The
|
'''Delete zinfo.filename, and write a new file into the archive. The
|
||||||
contents is the string 'bytes'."""
|
contents is the string 'bytes'.'''
|
||||||
self.delete(zinfo.filename)
|
self.delete(zinfo.filename)
|
||||||
self.writestr(zinfo, byts)
|
self.writestr(zinfo, byts)
|
||||||
|
|
||||||
@ -988,7 +988,7 @@ class ZipFile:
|
|||||||
return zinfo.filename
|
return zinfo.filename
|
||||||
|
|
||||||
def getinfo(self, name):
|
def getinfo(self, name):
|
||||||
"""Return the instance of ZipInfo given 'name'."""
|
'''Return the instance of ZipInfo given 'name'.'''
|
||||||
info = self.NameToInfo.get(name)
|
info = self.NameToInfo.get(name)
|
||||||
if info is None:
|
if info is None:
|
||||||
raise KeyError(
|
raise KeyError(
|
||||||
@ -1010,7 +1010,7 @@ class ZipFile:
|
|||||||
return zef.read_raw()
|
return zef.read_raw()
|
||||||
|
|
||||||
def open(self, name, mode='r', pwd=None):
|
def open(self, name, mode='r', pwd=None):
|
||||||
"""Return file-like object for 'name'."""
|
'''Return file-like object for 'name'.'''
|
||||||
if mode not in ('r', 'U', 'rU'):
|
if mode not in ('r', 'U', 'rU'):
|
||||||
raise RuntimeError('open() requires mode "r", "U", or "rU"')
|
raise RuntimeError('open() requires mode "r", "U", or "rU"')
|
||||||
if not self.fp:
|
if not self.fp:
|
||||||
@ -1083,11 +1083,11 @@ class ZipFile:
|
|||||||
return ZipExtFile(zef_file, mode, zinfo, zd)
|
return ZipExtFile(zef_file, mode, zinfo, zd)
|
||||||
|
|
||||||
def extract(self, member, path=None, pwd=None):
|
def extract(self, member, path=None, pwd=None):
|
||||||
"""Extract a member from the archive to the current working directory,
|
'''Extract a member from the archive to the current working directory,
|
||||||
using its full name. Its file information is extracted as accurately
|
using its full name. Its file information is extracted as accurately
|
||||||
as possible. `member' may be a filename or a ZipInfo object. You can
|
as possible. `member' may be a filename or a ZipInfo object. You can
|
||||||
specify a different directory using `path'.
|
specify a different directory using `path'.
|
||||||
"""
|
'''
|
||||||
if not isinstance(member, ZipInfo):
|
if not isinstance(member, ZipInfo):
|
||||||
member = self.getinfo(member)
|
member = self.getinfo(member)
|
||||||
|
|
||||||
@ -1097,11 +1097,11 @@ class ZipFile:
|
|||||||
return self._extract_member(member, path, pwd)
|
return self._extract_member(member, path, pwd)
|
||||||
|
|
||||||
def extractall(self, path=None, members=None, pwd=None):
|
def extractall(self, path=None, members=None, pwd=None):
|
||||||
"""Extract all members from the archive to the current working
|
'''Extract all members from the archive to the current working
|
||||||
directory. `path' specifies a different directory to extract to.
|
directory. `path' specifies a different directory to extract to.
|
||||||
`members' is optional and must be a subset of the list returned
|
`members' is optional and must be a subset of the list returned
|
||||||
by namelist().
|
by namelist().
|
||||||
"""
|
'''
|
||||||
if members is None:
|
if members is None:
|
||||||
members = self.namelist()
|
members = self.namelist()
|
||||||
|
|
||||||
@ -1113,9 +1113,9 @@ class ZipFile:
|
|||||||
self.extract(zipinfo, path, pwd)
|
self.extract(zipinfo, path, pwd)
|
||||||
|
|
||||||
def _extract_member(self, member, targetpath, pwd):
|
def _extract_member(self, member, targetpath, pwd):
|
||||||
"""Extract the ZipInfo object 'member' to a physical
|
'''Extract the ZipInfo object 'member' to a physical
|
||||||
file on the path targetpath.
|
file on the path targetpath.
|
||||||
"""
|
'''
|
||||||
# build the destination pathname, replacing
|
# build the destination pathname, replacing
|
||||||
# forward slashes to platform specific separators.
|
# forward slashes to platform specific separators.
|
||||||
# Strip trailing path separator, unless it represents the root.
|
# Strip trailing path separator, unless it represents the root.
|
||||||
@ -1293,9 +1293,9 @@ class ZipFile:
|
|||||||
|
|
||||||
def writestr(self, zinfo_or_arcname, byts, permissions=0o600,
|
def writestr(self, zinfo_or_arcname, byts, permissions=0o600,
|
||||||
compression=ZIP_DEFLATED, raw_bytes=False):
|
compression=ZIP_DEFLATED, raw_bytes=False):
|
||||||
"""Write a file into the archive. The contents is the string
|
'''Write a file into the archive. The contents is the string
|
||||||
'byts'. 'zinfo_or_arcname' is either a ZipInfo instance or
|
'byts'. 'zinfo_or_arcname' is either a ZipInfo instance or
|
||||||
the name of the file in the archive."""
|
the name of the file in the archive.'''
|
||||||
assert not raw_bytes or (raw_bytes and
|
assert not raw_bytes or (raw_bytes and
|
||||||
isinstance(zinfo_or_arcname, ZipInfo))
|
isinstance(zinfo_or_arcname, ZipInfo))
|
||||||
if not isinstance(byts, bytes):
|
if not isinstance(byts, bytes):
|
||||||
|
@ -330,7 +330,7 @@ class TouchscreenFeedTemplate(Template):
|
|||||||
navbar_t.append(navbar_tr)
|
navbar_t.append(navbar_tr)
|
||||||
top_navbar = navbar_t
|
top_navbar = navbar_t
|
||||||
bottom_navbar = copy.copy(navbar_t)
|
bottom_navbar = copy.copy(navbar_t)
|
||||||
# print "\n%s\n" % etree.tostring(navbar_t, pretty_print=True)
|
# print '\n%s\n' % etree.tostring(navbar_t, pretty_print=True)
|
||||||
|
|
||||||
# Build the page
|
# Build the page
|
||||||
head = HEAD(TITLE(feed.title))
|
head = HEAD(TITLE(feed.title))
|
||||||
@ -423,6 +423,6 @@ class TouchscreenNavBarTemplate(Template):
|
|||||||
navbar_tr.append(TD(attrs('article_next'),link))
|
navbar_tr.append(TD(attrs('article_next'),link))
|
||||||
navbar_t.append(navbar_tr)
|
navbar_t.append(navbar_tr)
|
||||||
navbar.append(navbar_t)
|
navbar.append(navbar_t)
|
||||||
# print "\n%s\n" % etree.tostring(navbar, pretty_print=True)
|
# print '\n%s\n' % etree.tostring(navbar, pretty_print=True)
|
||||||
|
|
||||||
self.root = HTML(head, BODY(navbar))
|
self.root = HTML(head, BODY(navbar))
|
||||||
|
@ -209,9 +209,9 @@ def cnv_namespacedToken(attribute, arg, element):
|
|||||||
|
|
||||||
|
|
||||||
def cnv_NCName(attribute, arg, element):
|
def cnv_NCName(attribute, arg, element):
|
||||||
""" NCName is defined in http://www.w3.org/TR/REC-xml-names/#NT-NCName
|
''' NCName is defined in http://www.w3.org/TR/REC-xml-names/#NT-NCName
|
||||||
Essentially an XML name minus ':'
|
Essentially an XML name minus ':'
|
||||||
"""
|
'''
|
||||||
if isinstance(arg, string_or_bytes):
|
if isinstance(arg, string_or_bytes):
|
||||||
return make_NCName(arg)
|
return make_NCName(arg)
|
||||||
else:
|
else:
|
||||||
|
@ -279,12 +279,12 @@ class CDATASection(Text, Childless):
|
|||||||
|
|
||||||
|
|
||||||
class Element(Node):
|
class Element(Node):
|
||||||
""" Creates a arbitrary element and is intended to be subclassed not used on its own.
|
''' Creates a arbitrary element and is intended to be subclassed not used on its own.
|
||||||
This element is the base of every element it defines a class which resembles
|
This element is the base of every element it defines a class which resembles
|
||||||
a xml-element. The main advantage of this kind of implementation is that you don't
|
a xml-element. The main advantage of this kind of implementation is that you don't
|
||||||
have to create a toXML method for every different object. Every element
|
have to create a toXML method for every different object. Every element
|
||||||
consists of an attribute, optional subelements, optional text and optional cdata.
|
consists of an attribute, optional subelements, optional text and optional cdata.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
nodeType = Node.ELEMENT_NODE
|
nodeType = Node.ELEMENT_NODE
|
||||||
namespaces = {} # Due to shallow copy this is a static variable
|
namespaces = {} # Due to shallow copy this is a static variable
|
||||||
@ -438,12 +438,12 @@ class Element(Node):
|
|||||||
self.setAttrNS(allowed_attrs[i][0], allowed_attrs[i][1], value)
|
self.setAttrNS(allowed_attrs[i][0], allowed_attrs[i][1], value)
|
||||||
|
|
||||||
def setAttrNS(self, namespace, localpart, value):
|
def setAttrNS(self, namespace, localpart, value):
|
||||||
""" Add an attribute to the element
|
''' Add an attribute to the element
|
||||||
In case you need to add an attribute the library doesn't know about
|
In case you need to add an attribute the library doesn't know about
|
||||||
then you must provide the full qualified name
|
then you must provide the full qualified name
|
||||||
It will not check that the attribute is legal according to the schema.
|
It will not check that the attribute is legal according to the schema.
|
||||||
Must overwrite, If attribute already exists.
|
Must overwrite, If attribute already exists.
|
||||||
"""
|
'''
|
||||||
c = AttrConverters()
|
c = AttrConverters()
|
||||||
self.attributes[(namespace, localpart)] = c.convert((namespace, localpart), value, self)
|
self.attributes[(namespace, localpart)] = c.convert((namespace, localpart), value, self)
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ CHARTNS = 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0'
|
|||||||
CHARTOOONS = 'http://openoffice.org/2010/chart'
|
CHARTOOONS = 'http://openoffice.org/2010/chart'
|
||||||
CONFIGNS = 'urn:oasis:names:tc:opendocument:xmlns:config:1.0'
|
CONFIGNS = 'urn:oasis:names:tc:opendocument:xmlns:config:1.0'
|
||||||
CSS3TNS = 'http://www.w3.org/TR/css3-text/'
|
CSS3TNS = 'http://www.w3.org/TR/css3-text/'
|
||||||
# DBNS = u"http://openoffice.org/2004/database"
|
# DBNS = u'http://openoffice.org/2004/database'
|
||||||
DBNS = 'urn:oasis:names:tc:opendocument:xmlns:database:1.0'
|
DBNS = 'urn:oasis:names:tc:opendocument:xmlns:database:1.0'
|
||||||
DCNS = 'http://purl.org/dc/elements/1.1/'
|
DCNS = 'http://purl.org/dc/elements/1.1/'
|
||||||
DOMNS = 'http://www.w3.org/2001/xml-events'
|
DOMNS = 'http://www.w3.org/2001/xml-events'
|
||||||
|
@ -182,7 +182,7 @@ class ODF2MoinMoin:
|
|||||||
def processFontDeclarations(self, fontDecl):
|
def processFontDeclarations(self, fontDecl):
|
||||||
''' Extracts necessary font information from a font-declaration
|
''' Extracts necessary font information from a font-declaration
|
||||||
element.
|
element.
|
||||||
'''
|
'''
|
||||||
for fontFace in fontDecl.getElementsByTagName('style:font-face'):
|
for fontFace in fontDecl.getElementsByTagName('style:font-face'):
|
||||||
if fontFace.getAttribute('style:font-pitch') == 'fixed':
|
if fontFace.getAttribute('style:font-pitch') == 'fixed':
|
||||||
self.fixedFonts.append(fontFace.getAttribute('style:name'))
|
self.fixedFonts.append(fontFace.getAttribute('style:name'))
|
||||||
|
@ -134,13 +134,13 @@ class StyleToCSS:
|
|||||||
}
|
}
|
||||||
|
|
||||||
def save_font(self, name, family, generic):
|
def save_font(self, name, family, generic):
|
||||||
""" It is possible that the HTML browser doesn't know how to
|
''' It is possible that the HTML browser doesn't know how to
|
||||||
show a particular font. Fortunately ODF provides generic fallbacks.
|
show a particular font. Fortunately ODF provides generic fallbacks.
|
||||||
Unfortunately they are not the same as CSS2.
|
Unfortunately they are not the same as CSS2.
|
||||||
CSS2: serif, sans-serif, cursive, fantasy, monospace
|
CSS2: serif, sans-serif, cursive, fantasy, monospace
|
||||||
ODF: roman, swiss, modern, decorative, script, system
|
ODF: roman, swiss, modern, decorative, script, system
|
||||||
This method put the font and fallback into a dictionary
|
This method put the font and fallback into a dictionary
|
||||||
"""
|
'''
|
||||||
htmlgeneric = 'sans-serif'
|
htmlgeneric = 'sans-serif'
|
||||||
if generic == 'roman':
|
if generic == 'roman':
|
||||||
htmlgeneric = 'serif'
|
htmlgeneric = 'serif'
|
||||||
@ -157,10 +157,10 @@ class StyleToCSS:
|
|||||||
self.fontdict[name] = (family, htmlgeneric)
|
self.fontdict[name] = (family, htmlgeneric)
|
||||||
|
|
||||||
def c_drawfillimage(self, ruleset, sdict, rule, val):
|
def c_drawfillimage(self, ruleset, sdict, rule, val):
|
||||||
""" Fill a figure with an image. Since CSS doesn't let you resize images
|
''' Fill a figure with an image. Since CSS doesn't let you resize images
|
||||||
this should really be implemented as an absolutely position <img>
|
this should really be implemented as an absolutely position <img>
|
||||||
with a width and a height
|
with a width and a height
|
||||||
"""
|
'''
|
||||||
sdict['background-image'] = "url('%s')" % self.fillimages[val]
|
sdict['background-image'] = "url('%s')" % self.fillimages[val]
|
||||||
|
|
||||||
def c_fo(self, ruleset, sdict, rule, val):
|
def c_fo(self, ruleset, sdict, rule, val):
|
||||||
@ -278,22 +278,22 @@ class StyleToCSS:
|
|||||||
sdict['left'] = ruleset[(SVGNS,'x')]
|
sdict['left'] = ruleset[(SVGNS,'x')]
|
||||||
|
|
||||||
def c_page_width(self, ruleset, sdict, rule, val):
|
def c_page_width(self, ruleset, sdict, rule, val):
|
||||||
""" Set width of box
|
''' Set width of box
|
||||||
HTML doesn't really have a page-width. It is always 100% of the browser width
|
HTML doesn't really have a page-width. It is always 100% of the browser width
|
||||||
"""
|
'''
|
||||||
sdict['width'] = val
|
sdict['width'] = val
|
||||||
|
|
||||||
def c_text_underline_style(self, ruleset, sdict, rule, val):
|
def c_text_underline_style(self, ruleset, sdict, rule, val):
|
||||||
""" Set underline decoration
|
''' Set underline decoration
|
||||||
HTML doesn't really have a page-width. It is always 100% of the browser width
|
HTML doesn't really have a page-width. It is always 100% of the browser width
|
||||||
"""
|
'''
|
||||||
if val and val != 'none':
|
if val and val != 'none':
|
||||||
sdict['text-decoration'] = 'underline'
|
sdict['text-decoration'] = 'underline'
|
||||||
|
|
||||||
def c_text_line_through_style(self, ruleset, sdict, rule, val):
|
def c_text_line_through_style(self, ruleset, sdict, rule, val):
|
||||||
""" Set underline decoration
|
''' Set underline decoration
|
||||||
HTML doesn't really have a page-width. It is always 100% of the browser width
|
HTML doesn't really have a page-width. It is always 100% of the browser width
|
||||||
"""
|
'''
|
||||||
if val and val != 'none':
|
if val and val != 'none':
|
||||||
sdict['text-decoration'] = 'line-through'
|
sdict['text-decoration'] = 'line-through'
|
||||||
|
|
||||||
@ -302,9 +302,9 @@ class StyleToCSS:
|
|||||||
sdict['height'] = val
|
sdict['height'] = val
|
||||||
|
|
||||||
def convert_styles(self, ruleset):
|
def convert_styles(self, ruleset):
|
||||||
""" Rule is a tuple of (namespace, name). If the namespace is '' then
|
''' Rule is a tuple of (namespace, name). If the namespace is '' then
|
||||||
it is already CSS2
|
it is already CSS2
|
||||||
"""
|
'''
|
||||||
sdict = {}
|
sdict = {}
|
||||||
for rule,val in ruleset.items():
|
for rule,val in ruleset.items():
|
||||||
if rule[0] == '':
|
if rule[0] == '':
|
||||||
@ -766,9 +766,9 @@ class ODF2XHTML(handler.ContentHandler):
|
|||||||
self.cs.fillimages[name] = imghref
|
self.cs.fillimages[name] = imghref
|
||||||
|
|
||||||
def rewritelink(self, imghref):
|
def rewritelink(self, imghref):
|
||||||
""" Intended to be overloaded if you don't store your pictures
|
''' Intended to be overloaded if you don't store your pictures
|
||||||
in a Pictures subfolder
|
in a Pictures subfolder
|
||||||
"""
|
'''
|
||||||
return imghref
|
return imghref
|
||||||
|
|
||||||
def s_draw_image(self, tag, attrs):
|
def s_draw_image(self, tag, attrs):
|
||||||
@ -980,10 +980,10 @@ dl.notes dd:last-of-type { page-break-after: avoid }
|
|||||||
''' '''
|
''' '''
|
||||||
|
|
||||||
def s_office_presentation(self, tag, attrs):
|
def s_office_presentation(self, tag, attrs):
|
||||||
""" For some odd reason, OpenOffice Impress doesn't define a default-style
|
''' For some odd reason, OpenOffice Impress doesn't define a default-style
|
||||||
for the 'paragraph'. We therefore force a standard when we see
|
for the 'paragraph'. We therefore force a standard when we see
|
||||||
it is a presentation
|
it is a presentation
|
||||||
"""
|
'''
|
||||||
self.styledict['p'] = {(FONS,'font-size'): '24pt'}
|
self.styledict['p'] = {(FONS,'font-size'): '24pt'}
|
||||||
self.styledict['presentation'] = {(FONS,'font-size'): '24pt'}
|
self.styledict['presentation'] = {(FONS,'font-size'): '24pt'}
|
||||||
self.html_body(tag, attrs)
|
self.html_body(tag, attrs)
|
||||||
@ -1039,12 +1039,12 @@ dl.notes dd:last-of-type { page-break-after: avoid }
|
|||||||
self.currentstyle = None
|
self.currentstyle = None
|
||||||
|
|
||||||
def s_style_font_face(self, tag, attrs):
|
def s_style_font_face(self, tag, attrs):
|
||||||
""" It is possible that the HTML browser doesn't know how to
|
''' It is possible that the HTML browser doesn't know how to
|
||||||
show a particular font. Luckily ODF provides generic fallbacks
|
show a particular font. Luckily ODF provides generic fallbacks
|
||||||
Unfortunately they are not the same as CSS2.
|
Unfortunately they are not the same as CSS2.
|
||||||
CSS2: serif, sans-serif, cursive, fantasy, monospace
|
CSS2: serif, sans-serif, cursive, fantasy, monospace
|
||||||
ODF: roman, swiss, modern, decorative, script, system
|
ODF: roman, swiss, modern, decorative, script, system
|
||||||
"""
|
'''
|
||||||
name = attrs[(STYLENS,'name')]
|
name = attrs[(STYLENS,'name')]
|
||||||
family = attrs[(SVGNS,'font-family')]
|
family = attrs[(SVGNS,'font-family')]
|
||||||
generic = attrs.get((STYLENS,'font-family-generic'),'')
|
generic = attrs.get((STYLENS,'font-family-generic'),'')
|
||||||
@ -1086,10 +1086,10 @@ dl.notes dd:last-of-type { page-break-after: avoid }
|
|||||||
self.styledict[self.currentstyle] = {}
|
self.styledict[self.currentstyle] = {}
|
||||||
|
|
||||||
def s_style_page_layout(self, tag, attrs):
|
def s_style_page_layout(self, tag, attrs):
|
||||||
""" Collect the formatting for the page layout style.
|
''' Collect the formatting for the page layout style.
|
||||||
This won't work in CSS 2.1, as page identifiers are not allowed.
|
This won't work in CSS 2.1, as page identifiers are not allowed.
|
||||||
It is legal in CSS3, but the rest of the application doesn't specify when to use what page layout
|
It is legal in CSS3, but the rest of the application doesn't specify when to use what page layout
|
||||||
"""
|
'''
|
||||||
name = attrs[(STYLENS,'name')]
|
name = attrs[(STYLENS,'name')]
|
||||||
name = name.replace('.','_')
|
name = name.replace('.','_')
|
||||||
self.currentstyle = '.PL-' + name
|
self.currentstyle = '.PL-' + name
|
||||||
@ -1319,10 +1319,10 @@ dl.notes dd:last-of-type { page-break-after: avoid }
|
|||||||
self.purgedata()
|
self.purgedata()
|
||||||
|
|
||||||
def s_text_list(self, tag, attrs):
|
def s_text_list(self, tag, attrs):
|
||||||
""" Start a list (<ul> or <ol>)
|
''' Start a list (<ul> or <ol>)
|
||||||
To know which level we're at, we have to count the number
|
To know which level we're at, we have to count the number
|
||||||
of <text:list> elements on the tagstack.
|
of <text:list> elements on the tagstack.
|
||||||
"""
|
'''
|
||||||
name = attrs.get((TEXTNS,'style-name'))
|
name = attrs.get((TEXTNS,'style-name'))
|
||||||
continue_numbering = attrs.get((TEXTNS, 'continue-numbering')) == 'true'
|
continue_numbering = attrs.get((TEXTNS, 'continue-numbering')) == 'true'
|
||||||
continue_list = attrs.get((TEXTNS, 'continue-list'))
|
continue_list = attrs.get((TEXTNS, 'continue-list'))
|
||||||
@ -1391,10 +1391,10 @@ dl.notes dd:last-of-type { page-break-after: avoid }
|
|||||||
self.purgedata()
|
self.purgedata()
|
||||||
|
|
||||||
def s_text_list_level_style_bullet(self, tag, attrs):
|
def s_text_list_level_style_bullet(self, tag, attrs):
|
||||||
""" CSS doesn't have the ability to set the glyph
|
''' CSS doesn't have the ability to set the glyph
|
||||||
to a particular character, so we just go through
|
to a particular character, so we just go through
|
||||||
the available glyphs
|
the available glyphs
|
||||||
"""
|
'''
|
||||||
name = self.tagstack.rfindattr((STYLENS,'name'))
|
name = self.tagstack.rfindattr((STYLENS,'name'))
|
||||||
level = attrs[(TEXTNS,'level')]
|
level = attrs[(TEXTNS,'level')]
|
||||||
self.prevstyle = self.currentstyle
|
self.prevstyle = self.currentstyle
|
||||||
@ -1649,10 +1649,10 @@ dl.notes dd:last-of-type { page-break-after: avoid }
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
def save(self, outputfile, addsuffix=False):
|
def save(self, outputfile, addsuffix=False):
|
||||||
""" Save the HTML under the filename.
|
''' Save the HTML under the filename.
|
||||||
If the filename is '-' then save to stdout
|
If the filename is '-' then save to stdout
|
||||||
We have the last style filename in self.stylefilename
|
We have the last style filename in self.stylefilename
|
||||||
"""
|
'''
|
||||||
if outputfile == '-':
|
if outputfile == '-':
|
||||||
import sys # Added by Kovid
|
import sys # Added by Kovid
|
||||||
outputfp = sys.stdout
|
outputfp = sys.stdout
|
||||||
|
@ -226,9 +226,9 @@ class OpenDocument:
|
|||||||
return xml.getvalue()
|
return xml.getvalue()
|
||||||
|
|
||||||
def __manifestxml(self):
|
def __manifestxml(self):
|
||||||
""" Generates the manifest.xml file
|
''' Generates the manifest.xml file
|
||||||
The self.manifest isn't available unless the document is being saved
|
The self.manifest isn't available unless the document is being saved
|
||||||
"""
|
'''
|
||||||
xml=PolyglotBytesIO()
|
xml=PolyglotBytesIO()
|
||||||
xml.write(_XMLPROLOGUE)
|
xml.write(_XMLPROLOGUE)
|
||||||
self.manifest.toXml(0,xml)
|
self.manifest.toXml(0,xml)
|
||||||
@ -313,11 +313,11 @@ class OpenDocument:
|
|||||||
return xml.getvalue()
|
return xml.getvalue()
|
||||||
|
|
||||||
def addPicture(self, filename, mediatype=None, content=None):
|
def addPicture(self, filename, mediatype=None, content=None):
|
||||||
""" Add a picture
|
''' Add a picture
|
||||||
It uses the same convention as OOo, in that it saves the picture in
|
It uses the same convention as OOo, in that it saves the picture in
|
||||||
the zipfile in the subdirectory 'Pictures'
|
the zipfile in the subdirectory 'Pictures'
|
||||||
If passed a file ptr, mediatype must be set
|
If passed a file ptr, mediatype must be set
|
||||||
"""
|
'''
|
||||||
if content is None:
|
if content is None:
|
||||||
if mediatype is None:
|
if mediatype is None:
|
||||||
mediatype, encoding = mimetypes.guess_type(filename)
|
mediatype, encoding = mimetypes.guess_type(filename)
|
||||||
@ -337,12 +337,12 @@ class OpenDocument:
|
|||||||
return manifestfn
|
return manifestfn
|
||||||
|
|
||||||
def addPictureFromFile(self, filename, mediatype=None):
|
def addPictureFromFile(self, filename, mediatype=None):
|
||||||
""" Add a picture
|
''' Add a picture
|
||||||
It uses the same convention as OOo, in that it saves the picture in
|
It uses the same convention as OOo, in that it saves the picture in
|
||||||
the zipfile in the subdirectory 'Pictures'.
|
the zipfile in the subdirectory 'Pictures'.
|
||||||
If mediatype is not given, it will be guessed from the filename
|
If mediatype is not given, it will be guessed from the filename
|
||||||
extension.
|
extension.
|
||||||
"""
|
'''
|
||||||
if mediatype is None:
|
if mediatype is None:
|
||||||
mediatype, encoding = mimetypes.guess_type(filename)
|
mediatype, encoding = mimetypes.guess_type(filename)
|
||||||
if mediatype is None:
|
if mediatype is None:
|
||||||
@ -358,12 +358,12 @@ class OpenDocument:
|
|||||||
return manifestfn
|
return manifestfn
|
||||||
|
|
||||||
def addPictureFromString(self, content, mediatype):
|
def addPictureFromString(self, content, mediatype):
|
||||||
""" Add a picture
|
''' Add a picture
|
||||||
It uses the same convention as OOo, in that it saves the picture in
|
It uses the same convention as OOo, in that it saves the picture in
|
||||||
the zipfile in the subdirectory 'Pictures'. The content variable
|
the zipfile in the subdirectory 'Pictures'. The content variable
|
||||||
is a string that contains the binary image data. The mediatype
|
is a string that contains the binary image data. The mediatype
|
||||||
indicates the image format.
|
indicates the image format.
|
||||||
"""
|
'''
|
||||||
ext = mimetypes.guess_extension(mediatype)
|
ext = mimetypes.guess_extension(mediatype)
|
||||||
manifestfn = f'Pictures/{(time.time()*10000000000):0.0f}{ext}'
|
manifestfn = f'Pictures/{(time.time()*10000000000):0.0f}{ext}'
|
||||||
self.Pictures[manifestfn] = (IS_IMAGE, content, mediatype)
|
self.Pictures[manifestfn] = (IS_IMAGE, content, mediatype)
|
||||||
@ -420,9 +420,9 @@ class OpenDocument:
|
|||||||
self.meta.addElement(meta.Generator(text=TOOLSVERSION))
|
self.meta.addElement(meta.Generator(text=TOOLSVERSION))
|
||||||
|
|
||||||
def save(self, outputfile, addsuffix=False):
|
def save(self, outputfile, addsuffix=False):
|
||||||
""" Save the document under the filename.
|
''' Save the document under the filename.
|
||||||
If the filename is '-' then save to stdout
|
If the filename is '-' then save to stdout
|
||||||
"""
|
'''
|
||||||
if outputfile == '-':
|
if outputfile == '-':
|
||||||
outputfp = zipfile.ZipFile(sys.stdout,'w')
|
outputfp = zipfile.ZipFile(sys.stdout,'w')
|
||||||
else:
|
else:
|
||||||
@ -529,9 +529,9 @@ class OpenDocument:
|
|||||||
|
|
||||||
# Document's DOM methods
|
# Document's DOM methods
|
||||||
def createElement(self, element):
|
def createElement(self, element):
|
||||||
""" Inconvenient interface to create an element, but follows XML-DOM.
|
''' Inconvenient interface to create an element, but follows XML-DOM.
|
||||||
Does not allow attributes as argument, therefore can't check grammar.
|
Does not allow attributes as argument, therefore can't check grammar.
|
||||||
"""
|
'''
|
||||||
return element(check_grammar=False)
|
return element(check_grammar=False)
|
||||||
|
|
||||||
def createTextNode(self, data):
|
def createTextNode(self, data):
|
||||||
|
@ -17,14 +17,14 @@
|
|||||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
|
||||||
"""
|
'''
|
||||||
Class for handling whitespace properly in OpenDocument.
|
Class for handling whitespace properly in OpenDocument.
|
||||||
|
|
||||||
While it is possible to use getTextContent() and setTextContent()
|
While it is possible to use getTextContent() and setTextContent()
|
||||||
to extract or create ODF content, these won't extract or create
|
to extract or create ODF content, these won't extract or create
|
||||||
the appropriate <text:s>, <text:tab>, or <text:line-break>
|
the appropriate <text:s>, <text:tab>, or <text:line-break>
|
||||||
elements. This module takes care of that problem.
|
elements. This module takes care of that problem.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
|
|
||||||
from .element import Node
|
from .element import Node
|
||||||
@ -38,12 +38,12 @@ class WhitespaceText:
|
|||||||
self.spaceCount = 0
|
self.spaceCount = 0
|
||||||
|
|
||||||
def addTextToElement(self, odfElement, s):
|
def addTextToElement(self, odfElement, s):
|
||||||
""" Process an input string, inserting
|
''' Process an input string, inserting
|
||||||
<text:tab> elements for '\t',
|
<text:tab> elements for '\t',
|
||||||
<text:line-break> elements for '\n', and
|
<text:line-break> elements for '\n', and
|
||||||
<text:s> elements for runs of more than one blank.
|
<text:s> elements for runs of more than one blank.
|
||||||
These will be added to the given element.
|
These will be added to the given element.
|
||||||
"""
|
'''
|
||||||
i = 0
|
i = 0
|
||||||
ch = ' '
|
ch = ' '
|
||||||
|
|
||||||
|
@ -85,6 +85,6 @@ with open(f'{base}/__init__.py', 'w') as f:
|
|||||||
def __getattr__(name):
|
def __getattr__(name):
|
||||||
if name in top_level_module_names:
|
if name in top_level_module_names:
|
||||||
import importlib
|
import importlib
|
||||||
return importlib.import_module("{QT_WRAPPER}." + name)
|
return importlib.import_module('{QT_WRAPPER}.' + name)
|
||||||
raise AttributeError(name)
|
raise AttributeError(name)
|
||||||
''', end='', file=f)
|
''', end='', file=f)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user