mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Use the modern form for set literals
This commit is contained in:
parent
1a928fc497
commit
b789e4d81b
@ -277,8 +277,8 @@ class Plugin(object): # {{{
|
|||||||
if self.plugin_path is not None:
|
if self.plugin_path is not None:
|
||||||
from calibre.utils.zipfile import ZipFile
|
from calibre.utils.zipfile import ZipFile
|
||||||
zf = ZipFile(self.plugin_path)
|
zf = ZipFile(self.plugin_path)
|
||||||
extensions = set([x.rpartition('.')[-1].lower() for x in
|
extensions = {x.rpartition('.')[-1].lower() for x in
|
||||||
zf.namelist()])
|
zf.namelist()}
|
||||||
zip_safe = True
|
zip_safe = True
|
||||||
for ext in ('pyd', 'so', 'dll', 'dylib'):
|
for ext in ('pyd', 'so', 'dll', 'dylib'):
|
||||||
if ext in extensions:
|
if ext in extensions:
|
||||||
@ -507,11 +507,10 @@ class CatalogPlugin(Plugin): # {{{
|
|||||||
|
|
||||||
def get_output_fields(self, db, opts):
|
def get_output_fields(self, db, opts):
|
||||||
# Return a list of requested fields
|
# Return a list of requested fields
|
||||||
all_std_fields = set(
|
all_std_fields = {'author_sort','authors','comments','cover','formats',
|
||||||
['author_sort','authors','comments','cover','formats',
|
|
||||||
'id','isbn','library_name','ondevice','pubdate','publisher',
|
'id','isbn','library_name','ondevice','pubdate','publisher',
|
||||||
'rating','series_index','series','size','tags','timestamp',
|
'rating','series_index','series','size','tags','timestamp',
|
||||||
'title_sort','title','uuid','languages','identifiers'])
|
'title_sort','title','uuid','languages','identifiers'}
|
||||||
all_custom_fields = set(db.custom_field_keys())
|
all_custom_fields = set(db.custom_field_keys())
|
||||||
for field in list(all_custom_fields):
|
for field in list(all_custom_fields):
|
||||||
fm = db.field_metadata[field]
|
fm = db.field_metadata[field]
|
||||||
|
@ -24,7 +24,7 @@ class PML2PMLZ(FileTypePlugin):
|
|||||||
'This plugin is run every time you add '
|
'This plugin is run every time you add '
|
||||||
'a PML file to the library.')
|
'a PML file to the library.')
|
||||||
version = numeric_version
|
version = numeric_version
|
||||||
file_types = set(['pml'])
|
file_types = {'pml'}
|
||||||
supported_platforms = ['windows', 'osx', 'linux']
|
supported_platforms = ['windows', 'osx', 'linux']
|
||||||
on_import = True
|
on_import = True
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ class TXT2TXTZ(FileTypePlugin):
|
|||||||
'containing Markdown or Textile references to images. The referenced '
|
'containing Markdown or Textile references to images. The referenced '
|
||||||
'images as well as the TXT file are added to the archive.')
|
'images as well as the TXT file are added to the archive.')
|
||||||
version = numeric_version
|
version = numeric_version
|
||||||
file_types = set(['txt', 'text'])
|
file_types = {'txt', 'text'}
|
||||||
supported_platforms = ['windows', 'osx', 'linux']
|
supported_platforms = ['windows', 'osx', 'linux']
|
||||||
on_import = True
|
on_import = True
|
||||||
|
|
||||||
@ -132,7 +132,7 @@ plugins += [HTML2ZIP, PML2PMLZ, TXT2TXTZ, ArchiveExtract,]
|
|||||||
class ComicMetadataReader(MetadataReaderPlugin):
|
class ComicMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read comic metadata'
|
name = 'Read comic metadata'
|
||||||
file_types = set(['cbr', 'cbz'])
|
file_types = {'cbr', 'cbz'}
|
||||||
description = _('Extract cover from comic files')
|
description = _('Extract cover from comic files')
|
||||||
|
|
||||||
def customization_help(self, gui=False):
|
def customization_help(self, gui=False):
|
||||||
@ -173,7 +173,7 @@ class ComicMetadataReader(MetadataReaderPlugin):
|
|||||||
class CHMMetadataReader(MetadataReaderPlugin):
|
class CHMMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read CHM metadata'
|
name = 'Read CHM metadata'
|
||||||
file_types = set(['chm'])
|
file_types = {'chm'}
|
||||||
description = _('Read metadata from %s files') % 'CHM'
|
description = _('Read metadata from %s files') % 'CHM'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -184,7 +184,7 @@ class CHMMetadataReader(MetadataReaderPlugin):
|
|||||||
class EPUBMetadataReader(MetadataReaderPlugin):
|
class EPUBMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read EPUB metadata'
|
name = 'Read EPUB metadata'
|
||||||
file_types = set(['epub'])
|
file_types = {'epub'}
|
||||||
description = _('Read metadata from %s files')%'EPUB'
|
description = _('Read metadata from %s files')%'EPUB'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -208,7 +208,7 @@ class FB2MetadataReader(MetadataReaderPlugin):
|
|||||||
class HTMLMetadataReader(MetadataReaderPlugin):
|
class HTMLMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read HTML metadata'
|
name = 'Read HTML metadata'
|
||||||
file_types = set(['html'])
|
file_types = {'html'}
|
||||||
description = _('Read metadata from %s files')%'HTML'
|
description = _('Read metadata from %s files')%'HTML'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -219,7 +219,7 @@ class HTMLMetadataReader(MetadataReaderPlugin):
|
|||||||
class HTMLZMetadataReader(MetadataReaderPlugin):
|
class HTMLZMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read HTMLZ metadata'
|
name = 'Read HTMLZ metadata'
|
||||||
file_types = set(['htmlz'])
|
file_types = {'htmlz'}
|
||||||
description = _('Read metadata from %s files') % 'HTMLZ'
|
description = _('Read metadata from %s files') % 'HTMLZ'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
|
|
||||||
@ -231,7 +231,7 @@ class HTMLZMetadataReader(MetadataReaderPlugin):
|
|||||||
class IMPMetadataReader(MetadataReaderPlugin):
|
class IMPMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read IMP metadata'
|
name = 'Read IMP metadata'
|
||||||
file_types = set(['imp'])
|
file_types = {'imp'}
|
||||||
description = _('Read metadata from %s files')%'IMP'
|
description = _('Read metadata from %s files')%'IMP'
|
||||||
author = 'Ashish Kulkarni'
|
author = 'Ashish Kulkarni'
|
||||||
|
|
||||||
@ -243,7 +243,7 @@ class IMPMetadataReader(MetadataReaderPlugin):
|
|||||||
class LITMetadataReader(MetadataReaderPlugin):
|
class LITMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read LIT metadata'
|
name = 'Read LIT metadata'
|
||||||
file_types = set(['lit'])
|
file_types = {'lit'}
|
||||||
description = _('Read metadata from %s files')%'LIT'
|
description = _('Read metadata from %s files')%'LIT'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -254,7 +254,7 @@ class LITMetadataReader(MetadataReaderPlugin):
|
|||||||
class LRFMetadataReader(MetadataReaderPlugin):
|
class LRFMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read LRF metadata'
|
name = 'Read LRF metadata'
|
||||||
file_types = set(['lrf'])
|
file_types = {'lrf'}
|
||||||
description = _('Read metadata from %s files')%'LRF'
|
description = _('Read metadata from %s files')%'LRF'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -265,7 +265,7 @@ class LRFMetadataReader(MetadataReaderPlugin):
|
|||||||
class LRXMetadataReader(MetadataReaderPlugin):
|
class LRXMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read LRX metadata'
|
name = 'Read LRX metadata'
|
||||||
file_types = set(['lrx'])
|
file_types = {'lrx'}
|
||||||
description = _('Read metadata from %s files')%'LRX'
|
description = _('Read metadata from %s files')%'LRX'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -276,7 +276,7 @@ class LRXMetadataReader(MetadataReaderPlugin):
|
|||||||
class MOBIMetadataReader(MetadataReaderPlugin):
|
class MOBIMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read MOBI metadata'
|
name = 'Read MOBI metadata'
|
||||||
file_types = set(['mobi', 'prc', 'azw', 'azw3', 'azw4', 'pobi'])
|
file_types = {'mobi', 'prc', 'azw', 'azw3', 'azw4', 'pobi'}
|
||||||
description = _('Read metadata from %s files')%'MOBI'
|
description = _('Read metadata from %s files')%'MOBI'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -287,7 +287,7 @@ class MOBIMetadataReader(MetadataReaderPlugin):
|
|||||||
class ODTMetadataReader(MetadataReaderPlugin):
|
class ODTMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read ODT metadata'
|
name = 'Read ODT metadata'
|
||||||
file_types = set(['odt'])
|
file_types = {'odt'}
|
||||||
description = _('Read metadata from %s files')%'ODT'
|
description = _('Read metadata from %s files')%'ODT'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -298,7 +298,7 @@ class ODTMetadataReader(MetadataReaderPlugin):
|
|||||||
class DocXMetadataReader(MetadataReaderPlugin):
|
class DocXMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read DOCX metadata'
|
name = 'Read DOCX metadata'
|
||||||
file_types = set(['docx'])
|
file_types = {'docx'}
|
||||||
description = _('Read metadata from %s files')%'DOCX'
|
description = _('Read metadata from %s files')%'DOCX'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -320,7 +320,7 @@ class OPFMetadataReader(MetadataReaderPlugin):
|
|||||||
class PDBMetadataReader(MetadataReaderPlugin):
|
class PDBMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read PDB metadata'
|
name = 'Read PDB metadata'
|
||||||
file_types = set(['pdb', 'updb'])
|
file_types = {'pdb', 'updb'}
|
||||||
description = _('Read metadata from %s files') % 'PDB'
|
description = _('Read metadata from %s files') % 'PDB'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
|
|
||||||
@ -332,7 +332,7 @@ class PDBMetadataReader(MetadataReaderPlugin):
|
|||||||
class PDFMetadataReader(MetadataReaderPlugin):
|
class PDFMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read PDF metadata'
|
name = 'Read PDF metadata'
|
||||||
file_types = set(['pdf'])
|
file_types = {'pdf'}
|
||||||
description = _('Read metadata from %s files')%'PDF'
|
description = _('Read metadata from %s files')%'PDF'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -345,7 +345,7 @@ class PDFMetadataReader(MetadataReaderPlugin):
|
|||||||
class PMLMetadataReader(MetadataReaderPlugin):
|
class PMLMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read PML metadata'
|
name = 'Read PML metadata'
|
||||||
file_types = set(['pml', 'pmlz'])
|
file_types = {'pml', 'pmlz'}
|
||||||
description = _('Read metadata from %s files') % 'PML'
|
description = _('Read metadata from %s files') % 'PML'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
|
|
||||||
@ -357,7 +357,7 @@ class PMLMetadataReader(MetadataReaderPlugin):
|
|||||||
class RARMetadataReader(MetadataReaderPlugin):
|
class RARMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read RAR metadata'
|
name = 'Read RAR metadata'
|
||||||
file_types = set(['rar'])
|
file_types = {'rar'}
|
||||||
description = _('Read metadata from e-books in RAR archives')
|
description = _('Read metadata from e-books in RAR archives')
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -368,7 +368,7 @@ class RARMetadataReader(MetadataReaderPlugin):
|
|||||||
class RBMetadataReader(MetadataReaderPlugin):
|
class RBMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read RB metadata'
|
name = 'Read RB metadata'
|
||||||
file_types = set(['rb'])
|
file_types = {'rb'}
|
||||||
description = _('Read metadata from %s files')%'RB'
|
description = _('Read metadata from %s files')%'RB'
|
||||||
author = 'Ashish Kulkarni'
|
author = 'Ashish Kulkarni'
|
||||||
|
|
||||||
@ -380,7 +380,7 @@ class RBMetadataReader(MetadataReaderPlugin):
|
|||||||
class RTFMetadataReader(MetadataReaderPlugin):
|
class RTFMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read RTF metadata'
|
name = 'Read RTF metadata'
|
||||||
file_types = set(['rtf'])
|
file_types = {'rtf'}
|
||||||
description = _('Read metadata from %s files')%'RTF'
|
description = _('Read metadata from %s files')%'RTF'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -391,7 +391,7 @@ class RTFMetadataReader(MetadataReaderPlugin):
|
|||||||
class SNBMetadataReader(MetadataReaderPlugin):
|
class SNBMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read SNB metadata'
|
name = 'Read SNB metadata'
|
||||||
file_types = set(['snb'])
|
file_types = {'snb'}
|
||||||
description = _('Read metadata from %s files') % 'SNB'
|
description = _('Read metadata from %s files') % 'SNB'
|
||||||
author = 'Li Fanxi'
|
author = 'Li Fanxi'
|
||||||
|
|
||||||
@ -403,7 +403,7 @@ class SNBMetadataReader(MetadataReaderPlugin):
|
|||||||
class TOPAZMetadataReader(MetadataReaderPlugin):
|
class TOPAZMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read Topaz metadata'
|
name = 'Read Topaz metadata'
|
||||||
file_types = set(['tpz', 'azw1'])
|
file_types = {'tpz', 'azw1'}
|
||||||
description = _('Read metadata from %s files')%'MOBI'
|
description = _('Read metadata from %s files')%'MOBI'
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -414,7 +414,7 @@ class TOPAZMetadataReader(MetadataReaderPlugin):
|
|||||||
class TXTMetadataReader(MetadataReaderPlugin):
|
class TXTMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read TXT metadata'
|
name = 'Read TXT metadata'
|
||||||
file_types = set(['txt'])
|
file_types = {'txt'}
|
||||||
description = _('Read metadata from %s files') % 'TXT'
|
description = _('Read metadata from %s files') % 'TXT'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
|
|
||||||
@ -426,7 +426,7 @@ class TXTMetadataReader(MetadataReaderPlugin):
|
|||||||
class TXTZMetadataReader(MetadataReaderPlugin):
|
class TXTZMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read TXTZ metadata'
|
name = 'Read TXTZ metadata'
|
||||||
file_types = set(['txtz'])
|
file_types = {'txtz'}
|
||||||
description = _('Read metadata from %s files') % 'TXTZ'
|
description = _('Read metadata from %s files') % 'TXTZ'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
|
|
||||||
@ -438,7 +438,7 @@ class TXTZMetadataReader(MetadataReaderPlugin):
|
|||||||
class ZipMetadataReader(MetadataReaderPlugin):
|
class ZipMetadataReader(MetadataReaderPlugin):
|
||||||
|
|
||||||
name = 'Read ZIP metadata'
|
name = 'Read ZIP metadata'
|
||||||
file_types = set(['zip', 'oebzip'])
|
file_types = {'zip', 'oebzip'}
|
||||||
description = _('Read metadata from e-books in ZIP archives')
|
description = _('Read metadata from e-books in ZIP archives')
|
||||||
|
|
||||||
def get_metadata(self, stream, ftype):
|
def get_metadata(self, stream, ftype):
|
||||||
@ -457,7 +457,7 @@ plugins += [x for x in list(locals().values()) if isinstance(x, type) and
|
|||||||
class EPUBMetadataWriter(MetadataWriterPlugin):
|
class EPUBMetadataWriter(MetadataWriterPlugin):
|
||||||
|
|
||||||
name = 'Set EPUB metadata'
|
name = 'Set EPUB metadata'
|
||||||
file_types = set(['epub'])
|
file_types = {'epub'}
|
||||||
description = _('Set metadata in %s files')%'EPUB'
|
description = _('Set metadata in %s files')%'EPUB'
|
||||||
|
|
||||||
def set_metadata(self, stream, mi, type):
|
def set_metadata(self, stream, mi, type):
|
||||||
@ -487,7 +487,7 @@ class FB2MetadataWriter(MetadataWriterPlugin):
|
|||||||
class HTMLZMetadataWriter(MetadataWriterPlugin):
|
class HTMLZMetadataWriter(MetadataWriterPlugin):
|
||||||
|
|
||||||
name = 'Set HTMLZ metadata'
|
name = 'Set HTMLZ metadata'
|
||||||
file_types = set(['htmlz'])
|
file_types = {'htmlz'}
|
||||||
description = _('Set metadata from %s files') % 'HTMLZ'
|
description = _('Set metadata from %s files') % 'HTMLZ'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
|
|
||||||
@ -499,7 +499,7 @@ class HTMLZMetadataWriter(MetadataWriterPlugin):
|
|||||||
class LRFMetadataWriter(MetadataWriterPlugin):
|
class LRFMetadataWriter(MetadataWriterPlugin):
|
||||||
|
|
||||||
name = 'Set LRF metadata'
|
name = 'Set LRF metadata'
|
||||||
file_types = set(['lrf'])
|
file_types = {'lrf'}
|
||||||
description = _('Set metadata in %s files')%'LRF'
|
description = _('Set metadata in %s files')%'LRF'
|
||||||
|
|
||||||
def set_metadata(self, stream, mi, type):
|
def set_metadata(self, stream, mi, type):
|
||||||
@ -510,7 +510,7 @@ class LRFMetadataWriter(MetadataWriterPlugin):
|
|||||||
class MOBIMetadataWriter(MetadataWriterPlugin):
|
class MOBIMetadataWriter(MetadataWriterPlugin):
|
||||||
|
|
||||||
name = 'Set MOBI metadata'
|
name = 'Set MOBI metadata'
|
||||||
file_types = set(['mobi', 'prc', 'azw', 'azw3', 'azw4'])
|
file_types = {'mobi', 'prc', 'azw', 'azw3', 'azw4'}
|
||||||
description = _('Set metadata in %s files')%'MOBI'
|
description = _('Set metadata in %s files')%'MOBI'
|
||||||
author = 'Marshall T. Vandegrift'
|
author = 'Marshall T. Vandegrift'
|
||||||
|
|
||||||
@ -522,7 +522,7 @@ class MOBIMetadataWriter(MetadataWriterPlugin):
|
|||||||
class PDBMetadataWriter(MetadataWriterPlugin):
|
class PDBMetadataWriter(MetadataWriterPlugin):
|
||||||
|
|
||||||
name = 'Set PDB metadata'
|
name = 'Set PDB metadata'
|
||||||
file_types = set(['pdb'])
|
file_types = {'pdb'}
|
||||||
description = _('Set metadata from %s files') % 'PDB'
|
description = _('Set metadata from %s files') % 'PDB'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
|
|
||||||
@ -534,7 +534,7 @@ class PDBMetadataWriter(MetadataWriterPlugin):
|
|||||||
class PDFMetadataWriter(MetadataWriterPlugin):
|
class PDFMetadataWriter(MetadataWriterPlugin):
|
||||||
|
|
||||||
name = 'Set PDF metadata'
|
name = 'Set PDF metadata'
|
||||||
file_types = set(['pdf'])
|
file_types = {'pdf'}
|
||||||
description = _('Set metadata in %s files') % 'PDF'
|
description = _('Set metadata in %s files') % 'PDF'
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
|
|
||||||
@ -546,7 +546,7 @@ class PDFMetadataWriter(MetadataWriterPlugin):
|
|||||||
class RTFMetadataWriter(MetadataWriterPlugin):
|
class RTFMetadataWriter(MetadataWriterPlugin):
|
||||||
|
|
||||||
name = 'Set RTF metadata'
|
name = 'Set RTF metadata'
|
||||||
file_types = set(['rtf'])
|
file_types = {'rtf'}
|
||||||
description = _('Set metadata in %s files')%'RTF'
|
description = _('Set metadata in %s files')%'RTF'
|
||||||
|
|
||||||
def set_metadata(self, stream, mi, type):
|
def set_metadata(self, stream, mi, type):
|
||||||
@ -557,7 +557,7 @@ class RTFMetadataWriter(MetadataWriterPlugin):
|
|||||||
class TOPAZMetadataWriter(MetadataWriterPlugin):
|
class TOPAZMetadataWriter(MetadataWriterPlugin):
|
||||||
|
|
||||||
name = 'Set TOPAZ metadata'
|
name = 'Set TOPAZ metadata'
|
||||||
file_types = set(['tpz', 'azw1'])
|
file_types = {'tpz', 'azw1'}
|
||||||
description = _('Set metadata in %s files')%'TOPAZ'
|
description = _('Set metadata in %s files')%'TOPAZ'
|
||||||
author = 'Greg Riker'
|
author = 'Greg Riker'
|
||||||
|
|
||||||
@ -569,7 +569,7 @@ class TOPAZMetadataWriter(MetadataWriterPlugin):
|
|||||||
class TXTZMetadataWriter(MetadataWriterPlugin):
|
class TXTZMetadataWriter(MetadataWriterPlugin):
|
||||||
|
|
||||||
name = 'Set TXTZ metadata'
|
name = 'Set TXTZ metadata'
|
||||||
file_types = set(['txtz'])
|
file_types = {'txtz'}
|
||||||
description = _('Set metadata from %s files') % 'TXTZ'
|
description = _('Set metadata from %s files') % 'TXTZ'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
|
|
||||||
@ -581,7 +581,7 @@ class TXTZMetadataWriter(MetadataWriterPlugin):
|
|||||||
class DocXMetadataWriter(MetadataWriterPlugin):
|
class DocXMetadataWriter(MetadataWriterPlugin):
|
||||||
|
|
||||||
name = 'Set DOCX metadata'
|
name = 'Set DOCX metadata'
|
||||||
file_types = set(['docx'])
|
file_types = {'docx'}
|
||||||
description = _('Read metadata from %s files')%'DOCX'
|
description = _('Read metadata from %s files')%'DOCX'
|
||||||
|
|
||||||
def set_metadata(self, stream, mi, type):
|
def set_metadata(self, stream, mi, type):
|
||||||
|
@ -166,7 +166,7 @@ class InputFormatPlugin(Plugin):
|
|||||||
#: Options shared by all Input format plugins. Do not override
|
#: Options shared by all Input format plugins. Do not override
|
||||||
#: in sub-classes. Use :attr:`options` instead. Every option must be an
|
#: in sub-classes. Use :attr:`options` instead. Every option must be an
|
||||||
#: instance of :class:`OptionRecommendation`.
|
#: instance of :class:`OptionRecommendation`.
|
||||||
common_options = set([
|
common_options = {
|
||||||
OptionRecommendation(name='input_encoding',
|
OptionRecommendation(name='input_encoding',
|
||||||
recommended_value=None, level=OptionRecommendation.LOW,
|
recommended_value=None, level=OptionRecommendation.LOW,
|
||||||
help=_('Specify the character encoding of the input document. If '
|
help=_('Specify the character encoding of the input document. If '
|
||||||
@ -174,9 +174,7 @@ class InputFormatPlugin(Plugin):
|
|||||||
'document itself. Particularly useful for documents that '
|
'document itself. Particularly useful for documents that '
|
||||||
'do not declare an encoding or that have erroneous '
|
'do not declare an encoding or that have erroneous '
|
||||||
'encoding declarations.')
|
'encoding declarations.')
|
||||||
),
|
)}
|
||||||
|
|
||||||
])
|
|
||||||
|
|
||||||
#: Options to customize the behavior of this plugin. Every option must be an
|
#: Options to customize the behavior of this plugin. Every option must be an
|
||||||
#: instance of :class:`OptionRecommendation`.
|
#: instance of :class:`OptionRecommendation`.
|
||||||
@ -297,14 +295,13 @@ class OutputFormatPlugin(Plugin):
|
|||||||
#: Options shared by all Input format plugins. Do not override
|
#: Options shared by all Input format plugins. Do not override
|
||||||
#: in sub-classes. Use :attr:`options` instead. Every option must be an
|
#: in sub-classes. Use :attr:`options` instead. Every option must be an
|
||||||
#: instance of :class:`OptionRecommendation`.
|
#: instance of :class:`OptionRecommendation`.
|
||||||
common_options = set([
|
common_options = {
|
||||||
OptionRecommendation(name='pretty_print',
|
OptionRecommendation(name='pretty_print',
|
||||||
recommended_value=False, level=OptionRecommendation.LOW,
|
recommended_value=False, level=OptionRecommendation.LOW,
|
||||||
help=_('If specified, the output plugin will try to create output '
|
help=_('If specified, the output plugin will try to create output '
|
||||||
'that is as human readable as possible. May not have any effect '
|
'that is as human readable as possible. May not have any effect '
|
||||||
'for some output plugins.')
|
'for some output plugins.')
|
||||||
),
|
)}
|
||||||
])
|
|
||||||
|
|
||||||
#: Options to customize the behavior of this plugin. Every option must be an
|
#: Options to customize the behavior of this plugin. Every option must be an
|
||||||
#: instance of :class:`OptionRecommendation`.
|
#: instance of :class:`OptionRecommendation`.
|
||||||
|
@ -44,7 +44,7 @@ class Plugin(_Plugin):
|
|||||||
class InputProfile(Plugin):
|
class InputProfile(Plugin):
|
||||||
|
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
supported_platforms = set(['windows', 'osx', 'linux'])
|
supported_platforms = {'windows', 'osx', 'linux'}
|
||||||
can_be_disabled = False
|
can_be_disabled = False
|
||||||
type = _('Input profile')
|
type = _('Input profile')
|
||||||
|
|
||||||
@ -242,7 +242,7 @@ input_profiles.sort(cmp=lambda x,y:cmp(x.name.lower(), y.name.lower()))
|
|||||||
class OutputProfile(Plugin):
|
class OutputProfile(Plugin):
|
||||||
|
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
supported_platforms = set(['windows', 'osx', 'linux'])
|
supported_platforms = {'windows', 'osx', 'linux'}
|
||||||
can_be_disabled = False
|
can_be_disabled = False
|
||||||
type = _('Output profile')
|
type = _('Output profile')
|
||||||
|
|
||||||
|
@ -102,9 +102,9 @@ def restore_plugin_state_to_default(plugin_or_name):
|
|||||||
config['enabled_plugins'] = ep
|
config['enabled_plugins'] = ep
|
||||||
|
|
||||||
|
|
||||||
default_disabled_plugins = set([
|
default_disabled_plugins = {
|
||||||
'Overdrive', 'Douban Books', 'OZON.ru', 'Edelweiss', 'Google Images', 'Big Book Search',
|
'Overdrive', 'Douban Books', 'OZON.ru', 'Edelweiss', 'Google Images', 'Big Book Search',
|
||||||
])
|
}
|
||||||
|
|
||||||
|
|
||||||
def is_disabled(plugin):
|
def is_disabled(plugin):
|
||||||
|
@ -1215,9 +1215,9 @@ class DB(object):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def custom_tables(self):
|
def custom_tables(self):
|
||||||
return set([x[0] for x in self.conn.get(
|
return {x[0] for x in self.conn.get(
|
||||||
'SELECT name FROM sqlite_master WHERE type="table" AND '
|
'SELECT name FROM sqlite_master WHERE type="table" AND '
|
||||||
'(name GLOB "custom_column_*" OR name GLOB "books_custom_column_*")')])
|
'(name GLOB "custom_column_*" OR name GLOB "books_custom_column_*")')}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def exists_at(cls, path):
|
def exists_at(cls, path):
|
||||||
|
@ -36,20 +36,20 @@ class FilesystemTest(BaseTest):
|
|||||||
ae, af, sf = self.assertEqual, self.assertFalse, cache.set_field
|
ae, af, sf = self.assertEqual, self.assertFalse, cache.set_field
|
||||||
|
|
||||||
# Test that changing metadata on a book with no formats/cover works
|
# Test that changing metadata on a book with no formats/cover works
|
||||||
ae(sf('title', {3:'moved1'}), set([3]))
|
ae(sf('title', {3:'moved1'}), {3})
|
||||||
ae(sf('authors', {3:'moved1'}), set([3]))
|
ae(sf('authors', {3:'moved1'}), {3})
|
||||||
ae(sf('title', {3:'Moved1'}), set([3]))
|
ae(sf('title', {3:'Moved1'}), {3})
|
||||||
ae(sf('authors', {3:'Moved1'}), set([3]))
|
ae(sf('authors', {3:'Moved1'}), {3})
|
||||||
ae(cache.field_for('title', 3), 'Moved1')
|
ae(cache.field_for('title', 3), 'Moved1')
|
||||||
ae(cache.field_for('authors', 3), ('Moved1',))
|
ae(cache.field_for('authors', 3), ('Moved1',))
|
||||||
|
|
||||||
# Now try with a book that has covers and formats
|
# Now try with a book that has covers and formats
|
||||||
orig_data = self.get_filesystem_data(cache, 1)
|
orig_data = self.get_filesystem_data(cache, 1)
|
||||||
orig_fpath = cache.format_abspath(1, 'FMT1')
|
orig_fpath = cache.format_abspath(1, 'FMT1')
|
||||||
ae(sf('title', {1:'moved'}), set([1]))
|
ae(sf('title', {1:'moved'}), {1})
|
||||||
ae(sf('authors', {1:'moved'}), set([1]))
|
ae(sf('authors', {1:'moved'}), {1})
|
||||||
ae(sf('title', {1:'Moved'}), set([1]))
|
ae(sf('title', {1:'Moved'}), {1})
|
||||||
ae(sf('authors', {1:'Moved'}), set([1]))
|
ae(sf('authors', {1:'Moved'}), {1})
|
||||||
ae(cache.field_for('title', 1), 'Moved')
|
ae(cache.field_for('title', 1), 'Moved')
|
||||||
ae(cache.field_for('authors', 1), ('Moved',))
|
ae(cache.field_for('authors', 1), ('Moved',))
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
|
@ -146,7 +146,7 @@ class LegacyTest(BaseTest):
|
|||||||
# Ensure that the following change will actually update the timestamp
|
# Ensure that the following change will actually update the timestamp
|
||||||
# on filesystems with one second resolution (OS X)
|
# on filesystems with one second resolution (OS X)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
self.assertEqual(db2.data.cache.set_field('title', {1:'xxx'}), set([1]))
|
self.assertEqual(db2.data.cache.set_field('title', {1:'xxx'}), {1})
|
||||||
db2.close()
|
db2.close()
|
||||||
del db2
|
del db2
|
||||||
self.assertNotEqual(db.title(1, index_is_id=True), 'xxx')
|
self.assertNotEqual(db.title(1, index_is_id=True), 'xxx')
|
||||||
|
@ -152,8 +152,8 @@ class WritingTest(BaseTest):
|
|||||||
del cache2
|
del cache2
|
||||||
self.assertEqual(cache.set_field('publisher', {1:'one', 2:'two',
|
self.assertEqual(cache.set_field('publisher', {1:'one', 2:'two',
|
||||||
3:'three'}), {1, 2, 3})
|
3:'three'}), {1, 2, 3})
|
||||||
self.assertEqual(cache.set_field('publisher', {1:''}), set([1]))
|
self.assertEqual(cache.set_field('publisher', {1:''}), {1})
|
||||||
self.assertEqual(cache.set_field('publisher', {1:'two'}), set([1]))
|
self.assertEqual(cache.set_field('publisher', {1:'two'}), {1})
|
||||||
self.assertEqual(tuple(map(f.for_book, (1,2,3))), ('two', 'two', 'three'))
|
self.assertEqual(tuple(map(f.for_book, (1,2,3))), ('two', 'two', 'three'))
|
||||||
self.assertEqual(cache.set_field('publisher', {1:'Two'}), {1, 2})
|
self.assertEqual(cache.set_field('publisher', {1:'Two'}), {1, 2})
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
@ -163,7 +163,7 @@ class WritingTest(BaseTest):
|
|||||||
# Enum
|
# Enum
|
||||||
self.assertFalse(cache.set_field('#enum', {1:'Not allowed'}))
|
self.assertFalse(cache.set_field('#enum', {1:'Not allowed'}))
|
||||||
self.assertEqual(cache.set_field('#enum', {1:'One', 2:'One', 3:'Three'}), {1, 3})
|
self.assertEqual(cache.set_field('#enum', {1:'One', 2:'One', 3:'Three'}), {1, 3})
|
||||||
self.assertEqual(cache.set_field('#enum', {1:None}), set([1]))
|
self.assertEqual(cache.set_field('#enum', {1:None}), {1})
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
for c in (cache, cache2):
|
for c in (cache, cache2):
|
||||||
for i, val in {1:None, 2:'One', 3:'Three'}.iteritems():
|
for i, val in {1:None, 2:'One', 3:'Three'}.iteritems():
|
||||||
@ -185,10 +185,10 @@ class WritingTest(BaseTest):
|
|||||||
# Series
|
# Series
|
||||||
self.assertFalse(cache.set_field('series',
|
self.assertFalse(cache.set_field('series',
|
||||||
{1:'a series one', 2:'a series one'}, allow_case_change=False))
|
{1:'a series one', 2:'a series one'}, allow_case_change=False))
|
||||||
self.assertEqual(cache.set_field('series', {3:'Series [3]'}), set([3]))
|
self.assertEqual(cache.set_field('series', {3:'Series [3]'}), {3})
|
||||||
self.assertEqual(cache.set_field('#series', {1:'Series', 3:'Series'}),
|
self.assertEqual(cache.set_field('#series', {1:'Series', 3:'Series'}),
|
||||||
{1, 3})
|
{1, 3})
|
||||||
self.assertEqual(cache.set_field('#series', {2:'Series [0]'}), set([2]))
|
self.assertEqual(cache.set_field('#series', {2:'Series [0]'}), {2})
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
for c in (cache, cache2):
|
for c in (cache, cache2):
|
||||||
for i, val in {1:'A Series One', 2:'A Series One', 3:'Series'}.iteritems():
|
for i, val in {1:'A Series One', 2:'A Series One', 3:'Series'}.iteritems():
|
||||||
@ -219,7 +219,7 @@ class WritingTest(BaseTest):
|
|||||||
ae(sf(name, {1:'tag one, News'}), {1, 2})
|
ae(sf(name, {1:'tag one, News'}), {1, 2})
|
||||||
ae(sf(name, {3:('tag two', 'sep,sep2')}), {2, 3})
|
ae(sf(name, {3:('tag two', 'sep,sep2')}), {2, 3})
|
||||||
ae(len(f.table.id_map), 4)
|
ae(len(f.table.id_map), 4)
|
||||||
ae(sf(name, {1:None}), set([1]))
|
ae(sf(name, {1:None}), {1})
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
for c in (cache, cache2):
|
for c in (cache, cache2):
|
||||||
ae(c.field_for(name, 3), ('tag two', 'sep;sep2'))
|
ae(c.field_for(name, 3), ('tag two', 'sep;sep2'))
|
||||||
@ -237,7 +237,7 @@ class WritingTest(BaseTest):
|
|||||||
f = cache.fields[name]
|
f = cache.fields[name]
|
||||||
ae(len(f.table.id_map), 3)
|
ae(len(f.table.id_map), 3)
|
||||||
af(cache.set_field(name, {3:None if name == 'authors' else 'Unknown'}))
|
af(cache.set_field(name, {3:None if name == 'authors' else 'Unknown'}))
|
||||||
ae(cache.set_field(name, {3:'Kovid Goyal & Divok Layog'}), set([3]))
|
ae(cache.set_field(name, {3:'Kovid Goyal & Divok Layog'}), {3})
|
||||||
ae(cache.set_field(name, {1:'', 2:'An, Author'}), {1,2})
|
ae(cache.set_field(name, {1:'', 2:'An, Author'}), {1,2})
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
for c in (cache, cache2):
|
for c in (cache, cache2):
|
||||||
@ -257,18 +257,18 @@ class WritingTest(BaseTest):
|
|||||||
# Languages
|
# Languages
|
||||||
f = cache.fields['languages']
|
f = cache.fields['languages']
|
||||||
ae(f.table.id_map, {1: 'eng', 2: 'deu'})
|
ae(f.table.id_map, {1: 'eng', 2: 'deu'})
|
||||||
ae(sf('languages', {1:''}), set([1]))
|
ae(sf('languages', {1:''}), {1})
|
||||||
ae(cache.field_for('languages', 1), ())
|
ae(cache.field_for('languages', 1), ())
|
||||||
ae(sf('languages', {2:('und',)}), set([2]))
|
ae(sf('languages', {2:('und',)}), {2})
|
||||||
af(f.table.id_map)
|
af(f.table.id_map)
|
||||||
ae(sf('languages', {1:'eng,fra,deu', 2:'es,Dutch', 3:'English'}), {1, 2, 3})
|
ae(sf('languages', {1:'eng,fra,deu', 2:'es,Dutch', 3:'English'}), {1, 2, 3})
|
||||||
ae(cache.field_for('languages', 1), ('eng', 'fra', 'deu'))
|
ae(cache.field_for('languages', 1), ('eng', 'fra', 'deu'))
|
||||||
ae(cache.field_for('languages', 2), ('spa', 'nld'))
|
ae(cache.field_for('languages', 2), ('spa', 'nld'))
|
||||||
ae(cache.field_for('languages', 3), ('eng',))
|
ae(cache.field_for('languages', 3), ('eng',))
|
||||||
ae(sf('languages', {3:None}), set([3]))
|
ae(sf('languages', {3:None}), {3})
|
||||||
ae(cache.field_for('languages', 3), ())
|
ae(cache.field_for('languages', 3), ())
|
||||||
ae(sf('languages', {1:'deu,fra,eng'}), set([1]), 'Changing order failed')
|
ae(sf('languages', {1:'deu,fra,eng'}), {1}, 'Changing order failed')
|
||||||
ae(sf('languages', {2:'deu,eng,eng'}), set([2]))
|
ae(sf('languages', {2:'deu,eng,eng'}), {2})
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
for c in (cache, cache2):
|
for c in (cache, cache2):
|
||||||
ae(cache.field_for('languages', 1), ('deu', 'fra', 'eng'))
|
ae(cache.field_for('languages', 1), ('deu', 'fra', 'eng'))
|
||||||
@ -277,9 +277,9 @@ class WritingTest(BaseTest):
|
|||||||
|
|
||||||
# Identifiers
|
# Identifiers
|
||||||
f = cache.fields['identifiers']
|
f = cache.fields['identifiers']
|
||||||
ae(sf('identifiers', {3: 'one:1,two:2'}), set([3]))
|
ae(sf('identifiers', {3: 'one:1,two:2'}), {3})
|
||||||
ae(sf('identifiers', {2:None}), set([2]))
|
ae(sf('identifiers', {2:None}), {2})
|
||||||
ae(sf('identifiers', {1: {'test':'1', 'two':'2'}}), set([1]))
|
ae(sf('identifiers', {1: {'test':'1', 'two':'2'}}), {1})
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
for c in (cache, cache2):
|
for c in (cache, cache2):
|
||||||
ae(c.field_for('identifiers', 3), {'one':'1', 'two':'2'})
|
ae(c.field_for('identifiers', 3), {'one':'1', 'two':'2'})
|
||||||
@ -323,7 +323,7 @@ class WritingTest(BaseTest):
|
|||||||
onowf = c.nowf
|
onowf = c.nowf
|
||||||
c.nowf = lambda: utime
|
c.nowf = lambda: utime
|
||||||
try:
|
try:
|
||||||
ae(sf('title', {3:'xxx'}), set([3]))
|
ae(sf('title', {3:'xxx'}), {3})
|
||||||
self.assertTrue(3 in cache.dirtied_cache)
|
self.assertTrue(3 in cache.dirtied_cache)
|
||||||
ae(cache.field_for('last_modified', 3), utime)
|
ae(cache.field_for('last_modified', 3), utime)
|
||||||
cache.dump_metadata()
|
cache.dump_metadata()
|
||||||
@ -374,7 +374,7 @@ class WritingTest(BaseTest):
|
|||||||
|
|
||||||
# Test removing a cover
|
# Test removing a cover
|
||||||
ae(cache.field_for('cover', 1), 1)
|
ae(cache.field_for('cover', 1), 1)
|
||||||
ae(cache.set_cover({1:None}), set([1]))
|
ae(cache.set_cover({1:None}), {1})
|
||||||
ae(cache.field_for('cover', 1), 0)
|
ae(cache.field_for('cover', 1), 0)
|
||||||
img = IMG
|
img = IMG
|
||||||
|
|
||||||
@ -645,7 +645,7 @@ class WritingTest(BaseTest):
|
|||||||
uv = int(cache.backend.user_version)
|
uv = int(cache.backend.user_version)
|
||||||
all_ids = cache.all_book_ids()
|
all_ids = cache.all_book_ids()
|
||||||
cache.dump_and_restore()
|
cache.dump_and_restore()
|
||||||
self.assertEqual(cache.set_field('title', {1:'nt'}), set([1]), 'database connection broken')
|
self.assertEqual(cache.set_field('title', {1:'nt'}), {1}, 'database connection broken')
|
||||||
cache = self.init_cache()
|
cache = self.init_cache()
|
||||||
self.assertEqual(cache.all_book_ids(), all_ids, 'dump and restore broke database')
|
self.assertEqual(cache.all_book_ids(), all_ids, 'dump and restore broke database')
|
||||||
self.assertEqual(int(cache.backend.user_version), uv)
|
self.assertEqual(int(cache.backend.user_version), uv)
|
||||||
|
@ -271,7 +271,7 @@ class KINDLE(USBMS):
|
|||||||
from calibre.ebooks.metadata import MetaInformation
|
from calibre.ebooks.metadata import MetaInformation
|
||||||
|
|
||||||
bm = annotation
|
bm = annotation
|
||||||
ignore_tags = set(['Catalog', 'Clippings'])
|
ignore_tags = {'Catalog', 'Clippings'}
|
||||||
|
|
||||||
if bm.type == 'kindle_bookmark':
|
if bm.type == 'kindle_bookmark':
|
||||||
mi = db.get_metadata(db_id, index_is_id=True)
|
mi = db.get_metadata(db_id, index_is_id=True)
|
||||||
|
@ -1280,7 +1280,7 @@ class KOBO(USBMS):
|
|||||||
def add_annotation_to_library(self, db, db_id, annotation):
|
def add_annotation_to_library(self, db, db_id, annotation):
|
||||||
from calibre.ebooks.BeautifulSoup import Tag
|
from calibre.ebooks.BeautifulSoup import Tag
|
||||||
bm = annotation
|
bm = annotation
|
||||||
ignore_tags = set(['Catalog', 'Clippings'])
|
ignore_tags = {'Catalog', 'Clippings'}
|
||||||
|
|
||||||
if bm.type == 'kobo_bookmark' and bm.value.last_read:
|
if bm.type == 'kobo_bookmark' and bm.value.last_read:
|
||||||
mi = db.get_metadata(db_id, index_is_id=True)
|
mi = db.get_metadata(db_id, index_is_id=True)
|
||||||
|
@ -14,7 +14,7 @@ class AZW4Input(InputFormatPlugin):
|
|||||||
name = 'AZW4 Input'
|
name = 'AZW4 Input'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
description = 'Convert AZW4 to HTML'
|
description = 'Convert AZW4 to HTML'
|
||||||
file_types = set(['azw4'])
|
file_types = {'azw4'}
|
||||||
commit_name = 'azw4_input'
|
commit_name = 'azw4_input'
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log,
|
def convert(self, stream, options, file_ext, log,
|
||||||
|
@ -15,7 +15,7 @@ class CHMInput(InputFormatPlugin):
|
|||||||
name = 'CHM Input'
|
name = 'CHM Input'
|
||||||
author = 'Kovid Goyal and Alex Bramley'
|
author = 'Kovid Goyal and Alex Bramley'
|
||||||
description = 'Convert CHM files to OEB'
|
description = 'Convert CHM files to OEB'
|
||||||
file_types = set(['chm'])
|
file_types = {'chm'}
|
||||||
commit_name = 'chm_input'
|
commit_name = 'chm_input'
|
||||||
|
|
||||||
def _chmtohtml(self, output_dir, chm_path, no_images, log, debug_dump=False):
|
def _chmtohtml(self, output_dir, chm_path, no_images, log, debug_dump=False):
|
||||||
|
@ -19,12 +19,12 @@ class ComicInput(InputFormatPlugin):
|
|||||||
name = 'Comic Input'
|
name = 'Comic Input'
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
description = 'Optimize comic files (.cbz, .cbr, .cbc) for viewing on portable devices'
|
description = 'Optimize comic files (.cbz, .cbr, .cbc) for viewing on portable devices'
|
||||||
file_types = set(['cbz', 'cbr', 'cbc'])
|
file_types = {'cbz', 'cbr', 'cbc'}
|
||||||
is_image_collection = True
|
is_image_collection = True
|
||||||
commit_name = 'comic_input'
|
commit_name = 'comic_input'
|
||||||
core_usage = -1
|
core_usage = -1
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='colors', recommended_value=0,
|
OptionRecommendation(name='colors', recommended_value=0,
|
||||||
help=_('Reduce the number of colors used in the image. This works only'
|
help=_('Reduce the number of colors used in the image. This works only'
|
||||||
' if you choose the PNG output format. It is useful to reduce file sizes.'
|
' if you choose the PNG output format. It is useful to reduce file sizes.'
|
||||||
@ -71,9 +71,9 @@ class ComicInput(InputFormatPlugin):
|
|||||||
help=_('When converting a CBC do not add links to each page to'
|
help=_('When converting a CBC do not add links to each page to'
|
||||||
' the TOC. Note this only applies if the TOC has more than one'
|
' the TOC. Note this only applies if the TOC has more than one'
|
||||||
' section')),
|
' section')),
|
||||||
])
|
}
|
||||||
|
|
||||||
recommendations = set([
|
recommendations = {
|
||||||
('margin_left', 0, OptionRecommendation.HIGH),
|
('margin_left', 0, OptionRecommendation.HIGH),
|
||||||
('margin_top', 0, OptionRecommendation.HIGH),
|
('margin_top', 0, OptionRecommendation.HIGH),
|
||||||
('margin_right', 0, OptionRecommendation.HIGH),
|
('margin_right', 0, OptionRecommendation.HIGH),
|
||||||
@ -88,7 +88,7 @@ class ComicInput(InputFormatPlugin):
|
|||||||
('page_breaks_before', None, OptionRecommendation.HIGH),
|
('page_breaks_before', None, OptionRecommendation.HIGH),
|
||||||
('disable_font_rescaling', True, OptionRecommendation.HIGH),
|
('disable_font_rescaling', True, OptionRecommendation.HIGH),
|
||||||
('linearize_tables', False, OptionRecommendation.HIGH),
|
('linearize_tables', False, OptionRecommendation.HIGH),
|
||||||
])
|
}
|
||||||
|
|
||||||
def get_comics_from_collection(self, stream):
|
def get_comics_from_collection(self, stream):
|
||||||
from calibre.libunzip import extract as zipextract
|
from calibre.libunzip import extract as zipextract
|
||||||
|
@ -18,7 +18,7 @@ class DJVUInput(InputFormatPlugin):
|
|||||||
name = 'DJVU Input'
|
name = 'DJVU Input'
|
||||||
author = 'Anthon van der Neut'
|
author = 'Anthon van der Neut'
|
||||||
description = 'Convert OCR-ed DJVU files (.djvu) to HTML'
|
description = 'Convert OCR-ed DJVU files (.djvu) to HTML'
|
||||||
file_types = set(['djvu', 'djv'])
|
file_types = {'djvu', 'djv'}
|
||||||
commit_name = 'djvu_input'
|
commit_name = 'djvu_input'
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log, accelerators):
|
def convert(self, stream, options, file_ext, log, accelerators):
|
||||||
|
@ -27,7 +27,7 @@ class DOCXInput(InputFormatPlugin):
|
|||||||
help=_('Render superscripts and subscripts so that they do not affect the line height.')),
|
help=_('Render superscripts and subscripts so that they do not affect the line height.')),
|
||||||
}
|
}
|
||||||
|
|
||||||
recommendations = set([('page_breaks_before', '/', OptionRecommendation.MED)])
|
recommendations = {('page_breaks_before', '/', OptionRecommendation.MED)}
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log, accelerators):
|
def convert(self, stream, options, file_ext, log, accelerators):
|
||||||
from calibre.ebooks.docx.to_html import Convert
|
from calibre.ebooks.docx.to_html import Convert
|
||||||
|
@ -32,11 +32,11 @@ class EPUBInput(InputFormatPlugin):
|
|||||||
name = 'EPUB Input'
|
name = 'EPUB Input'
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
description = 'Convert EPUB files (.epub) to HTML'
|
description = 'Convert EPUB files (.epub) to HTML'
|
||||||
file_types = set(['epub'])
|
file_types = {'epub'}
|
||||||
output_encoding = None
|
output_encoding = None
|
||||||
commit_name = 'epub_input'
|
commit_name = 'epub_input'
|
||||||
|
|
||||||
recommendations = set([('page_breaks_before', '/', OptionRecommendation.MED)])
|
recommendations = {('page_breaks_before', '/', OptionRecommendation.MED)}
|
||||||
|
|
||||||
def process_encryption(self, encfile, opf, log):
|
def process_encryption(self, encfile, opf, log):
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
|
@ -51,7 +51,7 @@ class EPUBOutput(OutputFormatPlugin):
|
|||||||
commit_name = 'epub_output'
|
commit_name = 'epub_output'
|
||||||
ui_data = {'versions': ('2', '3')}
|
ui_data = {'versions': ('2', '3')}
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='extract_to',
|
OptionRecommendation(name='extract_to',
|
||||||
help=_('Extract the contents of the generated %s file to the '
|
help=_('Extract the contents of the generated %s file to the '
|
||||||
'specified directory. The contents of the directory are first '
|
'specified directory. The contents of the directory are first '
|
||||||
@ -125,9 +125,9 @@ class EPUBOutput(OutputFormatPlugin):
|
|||||||
' actually need it.')
|
' actually need it.')
|
||||||
),
|
),
|
||||||
|
|
||||||
])
|
}
|
||||||
|
|
||||||
recommendations = set([('pretty_print', True, OptionRecommendation.HIGH)])
|
recommendations = {('pretty_print', True, OptionRecommendation.HIGH)}
|
||||||
|
|
||||||
def workaround_webkit_quirks(self): # {{{
|
def workaround_webkit_quirks(self): # {{{
|
||||||
from calibre.ebooks.oeb.base import XPath
|
from calibre.ebooks.oeb.base import XPath
|
||||||
|
@ -21,19 +21,18 @@ class FB2Input(InputFormatPlugin):
|
|||||||
file_types = {'fb2', 'fbz'}
|
file_types = {'fb2', 'fbz'}
|
||||||
commit_name = 'fb2_input'
|
commit_name = 'fb2_input'
|
||||||
|
|
||||||
recommendations = set([
|
recommendations = {
|
||||||
('level1_toc', '//h:h1', OptionRecommendation.MED),
|
('level1_toc', '//h:h1', OptionRecommendation.MED),
|
||||||
('level2_toc', '//h:h2', OptionRecommendation.MED),
|
('level2_toc', '//h:h2', OptionRecommendation.MED),
|
||||||
('level3_toc', '//h:h3', OptionRecommendation.MED),
|
('level3_toc', '//h:h3', OptionRecommendation.MED),
|
||||||
])
|
}
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='no_inline_fb2_toc',
|
OptionRecommendation(name='no_inline_fb2_toc',
|
||||||
recommended_value=False, level=OptionRecommendation.LOW,
|
recommended_value=False, level=OptionRecommendation.LOW,
|
||||||
help=_('Do not insert a Table of Contents at the beginning of the book.'
|
help=_('Do not insert a Table of Contents at the beginning of the book.'
|
||||||
)
|
)
|
||||||
),
|
)}
|
||||||
])
|
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log,
|
def convert(self, stream, options, file_ext, log,
|
||||||
accelerators):
|
accelerators):
|
||||||
|
@ -151,7 +151,7 @@ class FB2Output(OutputFormatPlugin):
|
|||||||
'genres': FB2_GENRES,
|
'genres': FB2_GENRES,
|
||||||
}
|
}
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='sectionize',
|
OptionRecommendation(name='sectionize',
|
||||||
recommended_value='files', level=OptionRecommendation.LOW,
|
recommended_value='files', level=OptionRecommendation.LOW,
|
||||||
choices=list(ui_data['sectionize']),
|
choices=list(ui_data['sectionize']),
|
||||||
@ -167,7 +167,7 @@ class FB2Output(OutputFormatPlugin):
|
|||||||
choices=FB2_GENRES,
|
choices=FB2_GENRES,
|
||||||
help=(_('Genre for the book. Choices: %s\n\n See: ') % ', '.join(FB2_GENRES)
|
help=(_('Genre for the book. Choices: %s\n\n See: ') % ', '.join(FB2_GENRES)
|
||||||
) + 'http://www.fictionbook.org/index.php/Eng:FictionBook_2.1_genres ' + _('for a complete list with descriptions.')),
|
) + 'http://www.fictionbook.org/index.php/Eng:FictionBook_2.1_genres ' + _('for a complete list with descriptions.')),
|
||||||
])
|
}
|
||||||
|
|
||||||
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
||||||
from calibre.ebooks.oeb.transforms.jacket import linearize_jacket
|
from calibre.ebooks.oeb.transforms.jacket import linearize_jacket
|
||||||
|
@ -30,10 +30,10 @@ class HTMLInput(InputFormatPlugin):
|
|||||||
name = 'HTML Input'
|
name = 'HTML Input'
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
description = 'Convert HTML and OPF files to an OEB'
|
description = 'Convert HTML and OPF files to an OEB'
|
||||||
file_types = set(['opf', 'html', 'htm', 'xhtml', 'xhtm', 'shtm', 'shtml'])
|
file_types = {'opf', 'html', 'htm', 'xhtml', 'xhtm', 'shtm', 'shtml'}
|
||||||
commit_name = 'html_input'
|
commit_name = 'html_input'
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='breadth_first',
|
OptionRecommendation(name='breadth_first',
|
||||||
recommended_value=False, level=OptionRecommendation.LOW,
|
recommended_value=False, level=OptionRecommendation.LOW,
|
||||||
help=_('Traverse links in HTML files breadth first. Normally, '
|
help=_('Traverse links in HTML files breadth first. Normally, '
|
||||||
@ -59,7 +59,7 @@ class HTMLInput(InputFormatPlugin):
|
|||||||
)
|
)
|
||||||
),
|
),
|
||||||
|
|
||||||
])
|
}
|
||||||
|
|
||||||
def convert(self, stream, opts, file_ext, log,
|
def convert(self, stream, opts, file_ext, log,
|
||||||
accelerators):
|
accelerators):
|
||||||
|
@ -22,7 +22,7 @@ class HTMLOutput(OutputFormatPlugin):
|
|||||||
file_type = 'zip'
|
file_type = 'zip'
|
||||||
commit_name = 'html_output'
|
commit_name = 'html_output'
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='template_css',
|
OptionRecommendation(name='template_css',
|
||||||
help=_('CSS file used for the output instead of the default file')),
|
help=_('CSS file used for the output instead of the default file')),
|
||||||
|
|
||||||
@ -37,9 +37,9 @@ class HTMLOutput(OutputFormatPlugin):
|
|||||||
'specified directory. WARNING: The contents of the directory '
|
'specified directory. WARNING: The contents of the directory '
|
||||||
'will be deleted.')
|
'will be deleted.')
|
||||||
),
|
),
|
||||||
])
|
}
|
||||||
|
|
||||||
recommendations = set([('pretty_print', True, OptionRecommendation.HIGH)])
|
recommendations = {('pretty_print', True, OptionRecommendation.HIGH)}
|
||||||
|
|
||||||
def generate_toc(self, oeb_book, ref_url, output_dir):
|
def generate_toc(self, oeb_book, ref_url, output_dir):
|
||||||
'''
|
'''
|
||||||
|
@ -17,7 +17,7 @@ class HTMLZInput(InputFormatPlugin):
|
|||||||
name = 'HTLZ Input'
|
name = 'HTLZ Input'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
description = 'Convert HTML files to HTML'
|
description = 'Convert HTML files to HTML'
|
||||||
file_types = set(['htmlz'])
|
file_types = {'htmlz'}
|
||||||
commit_name = 'htmlz_input'
|
commit_name = 'htmlz_input'
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log,
|
def convert(self, stream, options, file_ext, log,
|
||||||
|
@ -14,7 +14,7 @@ class LITInput(InputFormatPlugin):
|
|||||||
name = 'LIT Input'
|
name = 'LIT Input'
|
||||||
author = 'Marshall T. Vandegrift'
|
author = 'Marshall T. Vandegrift'
|
||||||
description = 'Convert LIT files to HTML'
|
description = 'Convert LIT files to HTML'
|
||||||
file_types = set(['lit'])
|
file_types = {'lit'}
|
||||||
commit_name = 'lit_input'
|
commit_name = 'lit_input'
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log,
|
def convert(self, stream, options, file_ext, log,
|
||||||
|
@ -15,7 +15,7 @@ class LRFInput(InputFormatPlugin):
|
|||||||
name = 'LRF Input'
|
name = 'LRF Input'
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
description = 'Convert LRF files to HTML'
|
description = 'Convert LRF files to HTML'
|
||||||
file_types = set(['lrf'])
|
file_types = {'lrf'}
|
||||||
commit_name = 'lrf_input'
|
commit_name = 'lrf_input'
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log,
|
def convert(self, stream, options, file_ext, log,
|
||||||
|
@ -92,7 +92,7 @@ class LRFOutput(OutputFormatPlugin):
|
|||||||
file_type = 'lrf'
|
file_type = 'lrf'
|
||||||
commit_name = 'lrf_output'
|
commit_name = 'lrf_output'
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='enable_autorotation', recommended_value=False,
|
OptionRecommendation(name='enable_autorotation', recommended_value=False,
|
||||||
help=_('Enable auto-rotation of images that are wider than the screen width.')
|
help=_('Enable auto-rotation of images that are wider than the screen width.')
|
||||||
),
|
),
|
||||||
@ -134,11 +134,10 @@ class LRFOutput(OutputFormatPlugin):
|
|||||||
help=_('The monospace family of fonts to embed')
|
help=_('The monospace family of fonts to embed')
|
||||||
),
|
),
|
||||||
|
|
||||||
])
|
}
|
||||||
|
|
||||||
recommendations = set([
|
recommendations = {
|
||||||
('change_justification', 'original', OptionRecommendation.HIGH),
|
('change_justification', 'original', OptionRecommendation.HIGH)}
|
||||||
])
|
|
||||||
|
|
||||||
def convert_images(self, pages, opts, wide):
|
def convert_images(self, pages, opts, wide):
|
||||||
from calibre.ebooks.lrf.pylrs.pylrs import Book, BookSetting, ImageStream, ImageBlock
|
from calibre.ebooks.lrf.pylrs.pylrs import Book, BookSetting, ImageStream, ImageBlock
|
||||||
|
@ -13,7 +13,7 @@ class MOBIInput(InputFormatPlugin):
|
|||||||
name = 'MOBI Input'
|
name = 'MOBI Input'
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
description = 'Convert MOBI files (.mobi, .prc, .azw) to HTML'
|
description = 'Convert MOBI files (.mobi, .prc, .azw) to HTML'
|
||||||
file_types = set(['mobi', 'prc', 'azw', 'azw3', 'pobi'])
|
file_types = {'mobi', 'prc', 'azw', 'azw3', 'pobi'}
|
||||||
commit_name = 'mobi_input'
|
commit_name = 'mobi_input'
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log,
|
def convert(self, stream, options, file_ext, log,
|
||||||
|
@ -44,7 +44,7 @@ class MOBIOutput(OutputFormatPlugin):
|
|||||||
commit_name = 'mobi_output'
|
commit_name = 'mobi_output'
|
||||||
ui_data = {'file_types': ['old', 'both', 'new']}
|
ui_data = {'file_types': ['old', 'both', 'new']}
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='prefer_author_sort',
|
OptionRecommendation(name='prefer_author_sort',
|
||||||
recommended_value=False, level=OptionRecommendation.LOW,
|
recommended_value=False, level=OptionRecommendation.LOW,
|
||||||
help=_('When present, use author sort field as author.')
|
help=_('When present, use author sort field as author.')
|
||||||
@ -106,7 +106,7 @@ class MOBIOutput(OutputFormatPlugin):
|
|||||||
'more features than MOBI 6, but only works with newer Kindles. '
|
'more features than MOBI 6, but only works with newer Kindles. '
|
||||||
'Allowed values: {}').format('old, both, new')),
|
'Allowed values: {}').format('old, both, new')),
|
||||||
|
|
||||||
])
|
}
|
||||||
|
|
||||||
def check_for_periodical(self):
|
def check_for_periodical(self):
|
||||||
if self.is_periodical:
|
if self.is_periodical:
|
||||||
@ -274,7 +274,7 @@ class AZW3Output(OutputFormatPlugin):
|
|||||||
file_type = 'azw3'
|
file_type = 'azw3'
|
||||||
commit_name = 'azw3_output'
|
commit_name = 'azw3_output'
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='prefer_author_sort',
|
OptionRecommendation(name='prefer_author_sort',
|
||||||
recommended_value=False, level=OptionRecommendation.LOW,
|
recommended_value=False, level=OptionRecommendation.LOW,
|
||||||
help=_('When present, use author sort field as author.')
|
help=_('When present, use author sort field as author.')
|
||||||
@ -305,7 +305,7 @@ class AZW3Output(OutputFormatPlugin):
|
|||||||
' the book will not auto sync its last read position '
|
' the book will not auto sync its last read position '
|
||||||
' on multiple devices. Complain to Amazon.')
|
' on multiple devices. Complain to Amazon.')
|
||||||
),
|
),
|
||||||
])
|
}
|
||||||
|
|
||||||
def convert(self, oeb, output_path, input_plugin, opts, log):
|
def convert(self, oeb, output_path, input_plugin, opts, log):
|
||||||
from calibre.ebooks.mobi.writer2.resources import Resources
|
from calibre.ebooks.mobi.writer2.resources import Resources
|
||||||
|
@ -15,7 +15,7 @@ class ODTInput(InputFormatPlugin):
|
|||||||
name = 'ODT Input'
|
name = 'ODT Input'
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
description = 'Convert ODT (OpenOffice) files to HTML'
|
description = 'Convert ODT (OpenOffice) files to HTML'
|
||||||
file_types = set(['odt'])
|
file_types = {'odt'}
|
||||||
commit_name = 'odt_input'
|
commit_name = 'odt_input'
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log,
|
def convert(self, stream, options, file_ext, log,
|
||||||
|
@ -18,7 +18,7 @@ class OEBOutput(OutputFormatPlugin):
|
|||||||
file_type = 'oeb'
|
file_type = 'oeb'
|
||||||
commit_name = 'oeb_output'
|
commit_name = 'oeb_output'
|
||||||
|
|
||||||
recommendations = set([('pretty_print', True, OptionRecommendation.HIGH)])
|
recommendations = {('pretty_print', True, OptionRecommendation.HIGH)}
|
||||||
|
|
||||||
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
||||||
from urllib import unquote
|
from urllib import unquote
|
||||||
|
@ -14,7 +14,7 @@ class PDBInput(InputFormatPlugin):
|
|||||||
name = 'PDB Input'
|
name = 'PDB Input'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
description = 'Convert PDB to HTML'
|
description = 'Convert PDB to HTML'
|
||||||
file_types = set(['pdb', 'updb'])
|
file_types = {'pdb', 'updb'}
|
||||||
commit_name = 'pdb_input'
|
commit_name = 'pdb_input'
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log,
|
def convert(self, stream, options, file_ext, log,
|
||||||
|
@ -19,7 +19,7 @@ class PDBOutput(OutputFormatPlugin):
|
|||||||
commit_name = 'pdb_output'
|
commit_name = 'pdb_output'
|
||||||
ui_data = {'formats': tuple(ALL_FORMAT_WRITERS)}
|
ui_data = {'formats': tuple(ALL_FORMAT_WRITERS)}
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='format', recommended_value='doc',
|
OptionRecommendation(name='format', recommended_value='doc',
|
||||||
level=OptionRecommendation.LOW,
|
level=OptionRecommendation.LOW,
|
||||||
short_switch='f', choices=list(ALL_FORMAT_WRITERS),
|
short_switch='f', choices=list(ALL_FORMAT_WRITERS),
|
||||||
@ -32,7 +32,7 @@ class PDBOutput(OutputFormatPlugin):
|
|||||||
OptionRecommendation(name='inline_toc',
|
OptionRecommendation(name='inline_toc',
|
||||||
recommended_value=False, level=OptionRecommendation.LOW,
|
recommended_value=False, level=OptionRecommendation.LOW,
|
||||||
help=_('Add Table of Contents to beginning of the book.')),
|
help=_('Add Table of Contents to beginning of the book.')),
|
||||||
])
|
}
|
||||||
|
|
||||||
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
||||||
close = False
|
close = False
|
||||||
|
@ -14,10 +14,10 @@ class PDFInput(InputFormatPlugin):
|
|||||||
name = 'PDF Input'
|
name = 'PDF Input'
|
||||||
author = 'Kovid Goyal and John Schember'
|
author = 'Kovid Goyal and John Schember'
|
||||||
description = 'Convert PDF files to HTML'
|
description = 'Convert PDF files to HTML'
|
||||||
file_types = set(['pdf'])
|
file_types = {'pdf'}
|
||||||
commit_name = 'pdf_input'
|
commit_name = 'pdf_input'
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='no_images', recommended_value=False,
|
OptionRecommendation(name='no_images', recommended_value=False,
|
||||||
help=_('Do not extract images from the document')),
|
help=_('Do not extract images from the document')),
|
||||||
OptionRecommendation(name='unwrap_factor', recommended_value=0.45,
|
OptionRecommendation(name='unwrap_factor', recommended_value=0.45,
|
||||||
@ -26,7 +26,7 @@ class PDFInput(InputFormatPlugin):
|
|||||||
'default is 0.45, just below the median line length.')),
|
'default is 0.45, just below the median line length.')),
|
||||||
OptionRecommendation(name='new_pdf_engine', recommended_value=False,
|
OptionRecommendation(name='new_pdf_engine', recommended_value=False,
|
||||||
help=_('Use the new PDF conversion engine. Currently not operational.'))
|
help=_('Use the new PDF conversion engine. Currently not operational.'))
|
||||||
])
|
}
|
||||||
|
|
||||||
def convert_new(self, stream, accelerators):
|
def convert_new(self, stream, accelerators):
|
||||||
from calibre.ebooks.pdf.pdftohtml import pdftohtml
|
from calibre.ebooks.pdf.pdftohtml import pdftohtml
|
||||||
|
@ -18,7 +18,7 @@ class PMLInput(InputFormatPlugin):
|
|||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
description = 'Convert PML to OEB'
|
description = 'Convert PML to OEB'
|
||||||
# pmlz is a zip file containing pml files and png images.
|
# pmlz is a zip file containing pml files and png images.
|
||||||
file_types = set(['pml', 'pmlz'])
|
file_types = {'pml', 'pmlz'}
|
||||||
commit_name = 'pml_input'
|
commit_name = 'pml_input'
|
||||||
|
|
||||||
def process_pml(self, pml_path, html_path, close_all=False):
|
def process_pml(self, pml_path, html_path, close_all=False):
|
||||||
|
@ -18,7 +18,7 @@ class PMLOutput(OutputFormatPlugin):
|
|||||||
file_type = 'pmlz'
|
file_type = 'pmlz'
|
||||||
commit_name = 'pml_output'
|
commit_name = 'pml_output'
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='pml_output_encoding', recommended_value='cp1252',
|
OptionRecommendation(name='pml_output_encoding', recommended_value='cp1252',
|
||||||
level=OptionRecommendation.LOW,
|
level=OptionRecommendation.LOW,
|
||||||
help=_('Specify the character encoding of the output document. '
|
help=_('Specify the character encoding of the output document. '
|
||||||
@ -32,7 +32,7 @@ class PMLOutput(OutputFormatPlugin):
|
|||||||
'have their size and depth reduced by default to accommodate '
|
'have their size and depth reduced by default to accommodate '
|
||||||
'applications that can not convert images on their '
|
'applications that can not convert images on their '
|
||||||
'own such as Dropbook.')),
|
'own such as Dropbook.')),
|
||||||
])
|
}
|
||||||
|
|
||||||
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
||||||
from calibre.ebooks.pml.pmlml import PMLMLizer
|
from calibre.ebooks.pml.pmlml import PMLMLizer
|
||||||
|
@ -14,7 +14,7 @@ class RBInput(InputFormatPlugin):
|
|||||||
name = 'RB Input'
|
name = 'RB Input'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
description = 'Convert RB files to HTML'
|
description = 'Convert RB files to HTML'
|
||||||
file_types = set(['rb'])
|
file_types = {'rb'}
|
||||||
commit_name = 'rb_input'
|
commit_name = 'rb_input'
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log,
|
def convert(self, stream, options, file_ext, log,
|
||||||
|
@ -16,11 +16,10 @@ class RBOutput(OutputFormatPlugin):
|
|||||||
file_type = 'rb'
|
file_type = 'rb'
|
||||||
commit_name = 'rb_output'
|
commit_name = 'rb_output'
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='inline_toc',
|
OptionRecommendation(name='inline_toc',
|
||||||
recommended_value=False, level=OptionRecommendation.LOW,
|
recommended_value=False, level=OptionRecommendation.LOW,
|
||||||
help=_('Add Table of Contents to beginning of the book.')),
|
help=_('Add Table of Contents to beginning of the book.'))}
|
||||||
])
|
|
||||||
|
|
||||||
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
||||||
from calibre.ebooks.rb.writer import RBWriter
|
from calibre.ebooks.rb.writer import RBWriter
|
||||||
|
@ -22,10 +22,10 @@ class RecipeInput(InputFormatPlugin):
|
|||||||
name = 'Recipe Input'
|
name = 'Recipe Input'
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
description = _('Download periodical content from the internet')
|
description = _('Download periodical content from the internet')
|
||||||
file_types = set(['recipe', 'downloaded_recipe'])
|
file_types = {'recipe', 'downloaded_recipe'}
|
||||||
commit_name = 'recipe_input'
|
commit_name = 'recipe_input'
|
||||||
|
|
||||||
recommendations = set([
|
recommendations = {
|
||||||
('chapter', None, OptionRecommendation.HIGH),
|
('chapter', None, OptionRecommendation.HIGH),
|
||||||
('dont_split_on_page_breaks', True, OptionRecommendation.HIGH),
|
('dont_split_on_page_breaks', True, OptionRecommendation.HIGH),
|
||||||
('use_auto_toc', False, OptionRecommendation.HIGH),
|
('use_auto_toc', False, OptionRecommendation.HIGH),
|
||||||
@ -33,9 +33,9 @@ class RecipeInput(InputFormatPlugin):
|
|||||||
('input_profile', 'default', OptionRecommendation.HIGH),
|
('input_profile', 'default', OptionRecommendation.HIGH),
|
||||||
('page_breaks_before', None, OptionRecommendation.HIGH),
|
('page_breaks_before', None, OptionRecommendation.HIGH),
|
||||||
('insert_metadata', False, OptionRecommendation.HIGH),
|
('insert_metadata', False, OptionRecommendation.HIGH),
|
||||||
])
|
}
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='test', recommended_value=False,
|
OptionRecommendation(name='test', recommended_value=False,
|
||||||
help=_(
|
help=_(
|
||||||
'Useful for recipe development. Forces'
|
'Useful for recipe development. Forces'
|
||||||
@ -53,7 +53,7 @@ class RecipeInput(InputFormatPlugin):
|
|||||||
help=_('Do not download latest version of builtin recipes from the calibre server')),
|
help=_('Do not download latest version of builtin recipes from the calibre server')),
|
||||||
OptionRecommendation(name='lrf', recommended_value=False,
|
OptionRecommendation(name='lrf', recommended_value=False,
|
||||||
help='Optimize fetching for subsequent conversion to LRF.'),
|
help='Optimize fetching for subsequent conversion to LRF.'),
|
||||||
])
|
}
|
||||||
|
|
||||||
def convert(self, recipe_or_file, opts, file_ext, log,
|
def convert(self, recipe_or_file, opts, file_ext, log,
|
||||||
accelerators):
|
accelerators):
|
||||||
|
@ -43,7 +43,7 @@ class RTFInput(InputFormatPlugin):
|
|||||||
name = 'RTF Input'
|
name = 'RTF Input'
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
description = 'Convert RTF files to HTML'
|
description = 'Convert RTF files to HTML'
|
||||||
file_types = set(['rtf'])
|
file_types = {'rtf'}
|
||||||
commit_name = 'rtf_input'
|
commit_name = 'rtf_input'
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
|
@ -22,7 +22,7 @@ class SNBInput(InputFormatPlugin):
|
|||||||
name = 'SNB Input'
|
name = 'SNB Input'
|
||||||
author = 'Li Fanxi'
|
author = 'Li Fanxi'
|
||||||
description = 'Convert SNB files to OEB'
|
description = 'Convert SNB files to OEB'
|
||||||
file_types = set(['snb'])
|
file_types = {'snb'}
|
||||||
commit_name = 'snb_input'
|
commit_name = 'snb_input'
|
||||||
|
|
||||||
options = set([
|
options = set([
|
||||||
|
@ -18,7 +18,7 @@ class SNBOutput(OutputFormatPlugin):
|
|||||||
file_type = 'snb'
|
file_type = 'snb'
|
||||||
commit_name = 'snb_output'
|
commit_name = 'snb_output'
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='snb_output_encoding', recommended_value='utf-8',
|
OptionRecommendation(name='snb_output_encoding', recommended_value='utf-8',
|
||||||
level=OptionRecommendation.LOW,
|
level=OptionRecommendation.LOW,
|
||||||
help=_('Specify the character encoding of the output document. '
|
help=_('Specify the character encoding of the output document. '
|
||||||
@ -45,7 +45,7 @@ class SNBOutput(OutputFormatPlugin):
|
|||||||
OptionRecommendation(name='snb_full_screen',
|
OptionRecommendation(name='snb_full_screen',
|
||||||
recommended_value=False, level=OptionRecommendation.LOW,
|
recommended_value=False, level=OptionRecommendation.LOW,
|
||||||
help=_('Resize all the images for full screen view. ')),
|
help=_('Resize all the images for full screen view. ')),
|
||||||
])
|
}
|
||||||
|
|
||||||
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
|
@ -14,7 +14,7 @@ class TCRInput(InputFormatPlugin):
|
|||||||
name = 'TCR Input'
|
name = 'TCR Input'
|
||||||
author = 'John Schember'
|
author = 'John Schember'
|
||||||
description = 'Convert TCR files to HTML'
|
description = 'Convert TCR files to HTML'
|
||||||
file_types = set(['tcr'])
|
file_types = {'tcr'}
|
||||||
commit_name = 'tcr_input'
|
commit_name = 'tcr_input'
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log, accelerators):
|
def convert(self, stream, options, file_ext, log, accelerators):
|
||||||
|
@ -17,12 +17,11 @@ class TCROutput(OutputFormatPlugin):
|
|||||||
file_type = 'tcr'
|
file_type = 'tcr'
|
||||||
commit_name = 'tcr_output'
|
commit_name = 'tcr_output'
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='tcr_output_encoding', recommended_value='utf-8',
|
OptionRecommendation(name='tcr_output_encoding', recommended_value='utf-8',
|
||||||
level=OptionRecommendation.LOW,
|
level=OptionRecommendation.LOW,
|
||||||
help=_('Specify the character encoding of the output document. '
|
help=_('Specify the character encoding of the output document. '
|
||||||
'The default is utf-8.')),
|
'The default is utf-8.'))}
|
||||||
])
|
|
||||||
|
|
||||||
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
||||||
from calibre.ebooks.txt.txtml import TXTMLizer
|
from calibre.ebooks.txt.txtml import TXTMLizer
|
||||||
|
@ -52,7 +52,7 @@ class TXTInput(InputFormatPlugin):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='formatting_type', recommended_value='auto',
|
OptionRecommendation(name='formatting_type', recommended_value='auto',
|
||||||
choices=list(ui_data['formatting_types']),
|
choices=list(ui_data['formatting_types']),
|
||||||
help=_('Formatting used within the document.\n'
|
help=_('Formatting used within the document.\n'
|
||||||
@ -87,7 +87,7 @@ class TXTInput(InputFormatPlugin):
|
|||||||
'To learn more about markdown extensions, see {}\n'
|
'To learn more about markdown extensions, see {}\n'
|
||||||
'This should be a comma separated list of extensions to enable:\n'
|
'This should be a comma separated list of extensions to enable:\n'
|
||||||
).format('https://python-markdown.github.io/extensions/') + '\n'.join('* %s: %s' % (k, MD_EXTENSIONS[k]) for k in sorted(MD_EXTENSIONS))),
|
).format('https://python-markdown.github.io/extensions/') + '\n'.join('* %s: %s' % (k, MD_EXTENSIONS[k]) for k in sorted(MD_EXTENSIONS))),
|
||||||
])
|
}
|
||||||
|
|
||||||
def shift_file(self, base_dir, fname, data):
|
def shift_file(self, base_dir, fname, data):
|
||||||
name, ext = os.path.splitext(fname)
|
name, ext = os.path.splitext(fname)
|
||||||
|
@ -30,7 +30,7 @@ class TXTOutput(OutputFormatPlugin):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
options = set([
|
options = {
|
||||||
OptionRecommendation(name='newline', recommended_value='system',
|
OptionRecommendation(name='newline', recommended_value='system',
|
||||||
level=OptionRecommendation.LOW,
|
level=OptionRecommendation.LOW,
|
||||||
short_switch='n', choices=NEWLINE_TYPES,
|
short_switch='n', choices=NEWLINE_TYPES,
|
||||||
@ -80,7 +80,7 @@ class TXTOutput(OutputFormatPlugin):
|
|||||||
'formatting that supports setting font color. If this option is '
|
'formatting that supports setting font color. If this option is '
|
||||||
'not specified font color will not be set and default to the '
|
'not specified font color will not be set and default to the '
|
||||||
'color displayed by the reader (generally this is black).')),
|
'color displayed by the reader (generally this is black).')),
|
||||||
])
|
}
|
||||||
|
|
||||||
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
def convert(self, oeb_book, output_path, input_plugin, opts, log):
|
||||||
from calibre.ebooks.txt.txtml import TXTMLizer
|
from calibre.ebooks.txt.txtml import TXTMLizer
|
||||||
|
@ -22,7 +22,7 @@ file containing all linked files. This plugin is run \
|
|||||||
every time you add an HTML file to the library.\
|
every time you add an HTML file to the library.\
|
||||||
'''))
|
'''))
|
||||||
version = numeric_version
|
version = numeric_version
|
||||||
file_types = set(['html', 'htm', 'xhtml', 'xhtm', 'shtm', 'shtml'])
|
file_types = {'html', 'htm', 'xhtml', 'xhtm', 'shtm', 'shtml'}
|
||||||
supported_platforms = ['windows', 'osx', 'linux']
|
supported_platforms = ['windows', 'osx', 'linux']
|
||||||
on_import = True
|
on_import = True
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ import calibre.ebooks.lit.mssha1 as mssha1
|
|||||||
|
|
||||||
__all__ = ['LitWriter']
|
__all__ = ['LitWriter']
|
||||||
|
|
||||||
LIT_IMAGES = set(['image/png', 'image/jpeg', 'image/gif'])
|
LIT_IMAGES = {'image/png', 'image/jpeg', 'image/gif'}
|
||||||
LIT_MIMES = OEB_DOCS | OEB_STYLES | LIT_IMAGES
|
LIT_MIMES = OEB_DOCS | OEB_STYLES | LIT_IMAGES
|
||||||
|
|
||||||
MS_COVER_TYPE = 'other.ms-coverimage-standard'
|
MS_COVER_TYPE = 'other.ms-coverimage-standard'
|
||||||
@ -115,7 +115,7 @@ LZXC_CONTROL = \
|
|||||||
|
|
||||||
COLLAPSE = re.compile(r'[ \t\r\n\v]+')
|
COLLAPSE = re.compile(r'[ \t\r\n\v]+')
|
||||||
|
|
||||||
PAGE_BREAKS = set(['always', 'left', 'right'])
|
PAGE_BREAKS = {'always', 'left', 'right'}
|
||||||
|
|
||||||
|
|
||||||
def decint(value):
|
def decint(value):
|
||||||
|
@ -820,7 +820,7 @@ class Text(LRFStream):
|
|||||||
text_tags = set(list(TextAttr.tag_map.keys()) +
|
text_tags = set(list(TextAttr.tag_map.keys()) +
|
||||||
list(Text.text_tags.keys()) +
|
list(Text.text_tags.keys()) +
|
||||||
list(ruby_tags.keys()))
|
list(ruby_tags.keys()))
|
||||||
text_tags -= set([0xf500+i for i in range(10)])
|
text_tags -= {0xf500+i for i in range(10)}
|
||||||
text_tags.add(0xf5cc)
|
text_tags.add(0xf5cc)
|
||||||
|
|
||||||
while stream.tell() < length:
|
while stream.tell() < length:
|
||||||
|
@ -13,9 +13,9 @@ from calibre.customize import FileTypePlugin
|
|||||||
|
|
||||||
|
|
||||||
def is_comic(list_of_names):
|
def is_comic(list_of_names):
|
||||||
extensions = set([x.rpartition('.')[-1].lower() for x in list_of_names
|
extensions = {x.rpartition('.')[-1].lower() for x in list_of_names
|
||||||
if '.' in x and x.lower().rpartition('/')[-1] != 'thumbs.db'])
|
if '.' in x and x.lower().rpartition('/')[-1] != 'thumbs.db'}
|
||||||
comic_extensions = set(['jpg', 'jpeg', 'png'])
|
comic_extensions = {'jpg', 'jpeg', 'png'}
|
||||||
return len(extensions - comic_extensions) == 0
|
return len(extensions - comic_extensions) == 0
|
||||||
|
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ class ArchiveExtract(FileTypePlugin):
|
|||||||
description = _('Extract common e-book formats from archive files '
|
description = _('Extract common e-book formats from archive files '
|
||||||
'(ZIP/RAR). Also try to autodetect if they are actually '
|
'(ZIP/RAR). Also try to autodetect if they are actually '
|
||||||
'CBZ/CBR files.')
|
'CBZ/CBR files.')
|
||||||
file_types = set(['zip', 'rar'])
|
file_types = {'zip', 'rar'}
|
||||||
supported_platforms = ['windows', 'osx', 'linux']
|
supported_platforms = ['windows', 'osx', 'linux']
|
||||||
on_import = True
|
on_import = True
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ class Ozon(Source):
|
|||||||
|
|
||||||
ozonid = identifiers.get('ozon', None)
|
ozonid = identifiers.get('ozon', None)
|
||||||
|
|
||||||
qItems = set([ozonid, isbn])
|
qItems = {ozonid, isbn}
|
||||||
|
|
||||||
# Added Russian variant of 'Unknown'
|
# Added Russian variant of 'Unknown'
|
||||||
unk = [_('Unknown').upper(), 'Неизв.'.upper(), icu_upper('Неизв.')]
|
unk = [_('Unknown').upper(), 'Неизв.'.upper(), icu_upper('Неизв.')]
|
||||||
|
@ -47,10 +47,10 @@ def title_test(title, exact=False):
|
|||||||
|
|
||||||
|
|
||||||
def authors_test(authors):
|
def authors_test(authors):
|
||||||
authors = set([x.lower() for x in authors])
|
authors = {x.lower() for x in authors}
|
||||||
|
|
||||||
def test(mi):
|
def test(mi):
|
||||||
au = set([x.lower() for x in mi.authors])
|
au = {x.lower() for x in mi.authors}
|
||||||
if msprefs['swap_author_names']:
|
if msprefs['swap_author_names']:
|
||||||
def revert_to_fn_ln(a):
|
def revert_to_fn_ln(a):
|
||||||
if ',' not in a:
|
if ',' not in a:
|
||||||
@ -61,7 +61,7 @@ def authors_test(authors):
|
|||||||
parts.insert(0, t)
|
parts.insert(0, t)
|
||||||
return ' '.join(parts)
|
return ' '.join(parts)
|
||||||
|
|
||||||
au = set([revert_to_fn_ln(x) for x in au])
|
au = {revert_to_fn_ln(x) for x in au}
|
||||||
|
|
||||||
if au == authors:
|
if au == authors:
|
||||||
return True
|
return True
|
||||||
@ -72,10 +72,10 @@ def authors_test(authors):
|
|||||||
|
|
||||||
|
|
||||||
def tags_test(tags):
|
def tags_test(tags):
|
||||||
tags = set([x.lower() for x in tags])
|
tags = {x.lower() for x in tags}
|
||||||
|
|
||||||
def test(mi):
|
def test(mi):
|
||||||
t = set([x.lower() for x in mi.tags])
|
t = {x.lower() for x in mi.tags}
|
||||||
if t == tags:
|
if t == tags:
|
||||||
return True
|
return True
|
||||||
prints('Tags test failed. Expected: \'%s\' found \'%s\''%(tags, t))
|
prints('Tags test failed. Expected: \'%s\' found \'%s\''%(tags, t))
|
||||||
|
@ -26,19 +26,19 @@ def MBP(name):
|
|||||||
|
|
||||||
MOBI_NSMAP = {None: XHTML_NS, 'mbp': MBP_NS}
|
MOBI_NSMAP = {None: XHTML_NS, 'mbp': MBP_NS}
|
||||||
INLINE_TAGS = {'span', 'a', 'code', 'u', 's', 'big', 'strike', 'tt', 'font', 'q', 'i', 'b', 'em', 'strong', 'sup', 'sub'}
|
INLINE_TAGS = {'span', 'a', 'code', 'u', 's', 'big', 'strike', 'tt', 'font', 'q', 'i', 'b', 'em', 'strong', 'sup', 'sub'}
|
||||||
HEADER_TAGS = set(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])
|
HEADER_TAGS = {'h1', 'h2', 'h3', 'h4', 'h5', 'h6'}
|
||||||
# GR: Added 'caption' to both sets
|
# GR: Added 'caption' to both sets
|
||||||
NESTABLE_TAGS = set(['ol', 'ul', 'li', 'table', 'tr', 'td', 'th', 'caption'])
|
NESTABLE_TAGS = {'ol', 'ul', 'li', 'table', 'tr', 'td', 'th', 'caption'}
|
||||||
TABLE_TAGS = set(['table', 'tr', 'td', 'th', 'caption'])
|
TABLE_TAGS = {'table', 'tr', 'td', 'th', 'caption'}
|
||||||
|
|
||||||
SPECIAL_TAGS = set(['hr', 'br'])
|
SPECIAL_TAGS = {'hr', 'br'}
|
||||||
CONTENT_TAGS = set(['img', 'hr', 'br'])
|
CONTENT_TAGS = {'img', 'hr', 'br'}
|
||||||
|
|
||||||
NOT_VTAGS = HEADER_TAGS | NESTABLE_TAGS | TABLE_TAGS | SPECIAL_TAGS | \
|
NOT_VTAGS = HEADER_TAGS | NESTABLE_TAGS | TABLE_TAGS | SPECIAL_TAGS | \
|
||||||
CONTENT_TAGS
|
CONTENT_TAGS
|
||||||
LEAF_TAGS = set(['base', 'basefont', 'frame', 'link', 'meta', 'area', 'br',
|
LEAF_TAGS = {'base', 'basefont', 'frame', 'link', 'meta', 'area', 'br',
|
||||||
'col', 'hr', 'img', 'input', 'param'])
|
'col', 'hr', 'img', 'input', 'param'}
|
||||||
PAGE_BREAKS = set(['always', 'left', 'right'])
|
PAGE_BREAKS = {'always', 'left', 'right'}
|
||||||
|
|
||||||
COLLAPSE = re.compile(r'[ \t\r\n\v]+')
|
COLLAPSE = re.compile(r'[ \t\r\n\v]+')
|
||||||
|
|
||||||
|
@ -354,8 +354,8 @@ class TBS(object): # {{{
|
|||||||
|
|
||||||
if spanner is None:
|
if spanner is None:
|
||||||
articles = depth_map[2]
|
articles = depth_map[2]
|
||||||
sections = set([self.section_map[a.parent_index] for a in
|
sections = {self.section_map[a.parent_index] for a in
|
||||||
articles])
|
articles}
|
||||||
sections = sorted(sections, key=lambda x:x.offset)
|
sections = sorted(sections, key=lambda x:x.offset)
|
||||||
section_map = {s:[a for a in articles if a.parent_index ==
|
section_map = {s:[a for a in articles if a.parent_index ==
|
||||||
s.index] for s in sections}
|
s.index] for s in sections}
|
||||||
|
@ -27,11 +27,11 @@ XML_NS = 'http://www.w3.org/XML/1998/namespace'
|
|||||||
OEB_DOC_NS = 'http://openebook.org/namespaces/oeb-document/1.0/'
|
OEB_DOC_NS = 'http://openebook.org/namespaces/oeb-document/1.0/'
|
||||||
OPF1_NS = 'http://openebook.org/namespaces/oeb-package/1.0/'
|
OPF1_NS = 'http://openebook.org/namespaces/oeb-package/1.0/'
|
||||||
OPF2_NS = 'http://www.idpf.org/2007/opf'
|
OPF2_NS = 'http://www.idpf.org/2007/opf'
|
||||||
OPF_NSES = set([OPF1_NS, OPF2_NS])
|
OPF_NSES = {OPF1_NS, OPF2_NS}
|
||||||
DC09_NS = 'http://purl.org/metadata/dublin_core'
|
DC09_NS = 'http://purl.org/metadata/dublin_core'
|
||||||
DC10_NS = 'http://purl.org/dc/elements/1.0/'
|
DC10_NS = 'http://purl.org/dc/elements/1.0/'
|
||||||
DC11_NS = 'http://purl.org/dc/elements/1.1/'
|
DC11_NS = 'http://purl.org/dc/elements/1.1/'
|
||||||
DC_NSES = set([DC09_NS, DC10_NS, DC11_NS])
|
DC_NSES = {DC09_NS, DC10_NS, DC11_NS}
|
||||||
XSI_NS = 'http://www.w3.org/2001/XMLSchema-instance'
|
XSI_NS = 'http://www.w3.org/2001/XMLSchema-instance'
|
||||||
DCTERMS_NS = 'http://purl.org/dc/terms/'
|
DCTERMS_NS = 'http://purl.org/dc/terms/'
|
||||||
NCX_NS = 'http://www.daisy.org/z3986/2005/ncx/'
|
NCX_NS = 'http://www.daisy.org/z3986/2005/ncx/'
|
||||||
@ -297,11 +297,11 @@ BINARY_MIME = 'application/octet-stream'
|
|||||||
|
|
||||||
XHTML_CSS_NAMESPACE = u'@namespace "%s";\n' % XHTML_NS
|
XHTML_CSS_NAMESPACE = u'@namespace "%s";\n' % XHTML_NS
|
||||||
|
|
||||||
OEB_STYLES = set([CSS_MIME, OEB_CSS_MIME, 'text/x-oeb-css', 'xhtml/css'])
|
OEB_STYLES = {CSS_MIME, OEB_CSS_MIME, 'text/x-oeb-css', 'xhtml/css'}
|
||||||
OEB_DOCS = set([XHTML_MIME, 'text/html', OEB_DOC_MIME,
|
OEB_DOCS = {XHTML_MIME, 'text/html', OEB_DOC_MIME,
|
||||||
'text/x-oeb-document'])
|
'text/x-oeb-document'}
|
||||||
OEB_RASTER_IMAGES = set([GIF_MIME, JPEG_MIME, PNG_MIME])
|
OEB_RASTER_IMAGES = {GIF_MIME, JPEG_MIME, PNG_MIME}
|
||||||
OEB_IMAGES = set([GIF_MIME, JPEG_MIME, PNG_MIME, SVG_MIME])
|
OEB_IMAGES = {GIF_MIME, JPEG_MIME, PNG_MIME, SVG_MIME}
|
||||||
|
|
||||||
MS_COVER_TYPE = 'other.ms-coverimage-standard'
|
MS_COVER_TYPE = 'other.ms-coverimage-standard'
|
||||||
|
|
||||||
@ -617,12 +617,12 @@ class Metadata(object):
|
|||||||
metadata items.
|
metadata items.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DC_TERMS = set(['contributor', 'coverage', 'creator', 'date',
|
DC_TERMS = {'contributor', 'coverage', 'creator', 'date',
|
||||||
'description', 'format', 'identifier', 'language',
|
'description', 'format', 'identifier', 'language',
|
||||||
'publisher', 'relation', 'rights', 'source',
|
'publisher', 'relation', 'rights', 'source',
|
||||||
'subject', 'title', 'type'])
|
'subject', 'title', 'type'}
|
||||||
CALIBRE_TERMS = set(['series', 'series_index', 'rating', 'timestamp',
|
CALIBRE_TERMS = {'series', 'series_index', 'rating', 'timestamp',
|
||||||
'publication_type', 'title_sort'])
|
'publication_type', 'title_sort'}
|
||||||
OPF_ATTRS = {'role': OPF('role'), 'file-as': OPF('file-as'),
|
OPF_ATTRS = {'role': OPF('role'), 'file-as': OPF('file-as'),
|
||||||
'scheme': OPF('scheme'), 'event': OPF('event'),
|
'scheme': OPF('scheme'), 'event': OPF('event'),
|
||||||
'type': XSI('type'), 'lang': XML('lang'), 'id': 'id'}
|
'type': XSI('type'), 'lang': XML('lang'), 'id': 'id'}
|
||||||
@ -1199,7 +1199,7 @@ class Manifest(object):
|
|||||||
href = urlnormalize(href)
|
href = urlnormalize(href)
|
||||||
base, ext = os.path.splitext(href)
|
base, ext = os.path.splitext(href)
|
||||||
index = 1
|
index = 1
|
||||||
lhrefs = set([x.lower() for x in self.hrefs])
|
lhrefs = {x.lower() for x in self.hrefs}
|
||||||
while href.lower() in lhrefs:
|
while href.lower() in lhrefs:
|
||||||
href = base + str(index) + ext
|
href = base + str(index) + ext
|
||||||
index += 1
|
index += 1
|
||||||
@ -1704,7 +1704,7 @@ class PageList(object):
|
|||||||
:attr:`klass`: Optional semantic class of this page.
|
:attr:`klass`: Optional semantic class of this page.
|
||||||
:attr:`id`: Optional unique identifier for this page.
|
:attr:`id`: Optional unique identifier for this page.
|
||||||
"""
|
"""
|
||||||
TYPES = set(['front', 'normal', 'special'])
|
TYPES = {'front', 'normal', 'special'}
|
||||||
|
|
||||||
def __init__(self, name, href, type='normal', klass=None, id=None):
|
def __init__(self, name, href, type='normal', klass=None, id=None):
|
||||||
self.name = unicode(name)
|
self.name = unicode(name)
|
||||||
|
@ -19,7 +19,7 @@ CASE_MANGLER_CSS = """
|
|||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
TEXT_TRANSFORMS = set(['capitalize', 'uppercase', 'lowercase'])
|
TEXT_TRANSFORMS = {'capitalize', 'uppercase', 'lowercase'}
|
||||||
|
|
||||||
|
|
||||||
class CaseMangler(object):
|
class CaseMangler(object):
|
||||||
|
@ -19,8 +19,8 @@ from calibre.ebooks.oeb.stylizer import Stylizer
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
from calibre.ptempfile import PersistentTemporaryFile
|
||||||
from calibre.utils.imghdr import what
|
from calibre.utils.imghdr import what
|
||||||
|
|
||||||
IMAGE_TAGS = set([XHTML('img'), XHTML('object')])
|
IMAGE_TAGS = {XHTML('img'), XHTML('object')}
|
||||||
KEEP_ATTRS = set(['class', 'style', 'width', 'height', 'align'])
|
KEEP_ATTRS = {'class', 'style', 'width', 'height', 'align'}
|
||||||
|
|
||||||
|
|
||||||
class Unavailable(Exception):
|
class Unavailable(Exception):
|
||||||
|
@ -514,7 +514,7 @@ class AddAction(InterfaceAction):
|
|||||||
ans = os.path.splitext(x)[1]
|
ans = os.path.splitext(x)[1]
|
||||||
ans = ans[1:] if len(ans) > 1 else ans
|
ans = ans[1:] if len(ans) > 1 else ans
|
||||||
return ans.lower()
|
return ans.lower()
|
||||||
remove = set([p for p in paths if ext(p) in ve])
|
remove = {p for p in paths if ext(p) in ve}
|
||||||
if remove:
|
if remove:
|
||||||
paths = [p for p in paths if p not in remove]
|
paths = [p for p in paths if p not in remove]
|
||||||
vmsg = getattr(self.gui.device_manager.device, 'VIRTUAL_BOOK_EXTENSION_MESSAGE', None) or _(
|
vmsg = getattr(self.gui.device_manager.device, 'VIRTUAL_BOOK_EXTENSION_MESSAGE', None) or _(
|
||||||
|
@ -90,7 +90,7 @@ class FetchAnnotationsAction(InterfaceAction):
|
|||||||
paths = []
|
paths = []
|
||||||
for x in ('memory', 'card_a', 'card_b'):
|
for x in ('memory', 'card_a', 'card_b'):
|
||||||
x = getattr(self.gui, x+'_view').model()
|
x = getattr(self.gui, x+'_view').model()
|
||||||
paths += x.paths_for_db_ids(set([id_]), as_map=True)[id_]
|
paths += x.paths_for_db_ids({id_}, as_map=True)[id_]
|
||||||
return paths[0].path if paths else None
|
return paths[0].path if paths else None
|
||||||
|
|
||||||
def generate_annotation_paths(ids, db, device):
|
def generate_annotation_paths(ids, db, device):
|
||||||
|
@ -250,8 +250,8 @@ class Worker(Thread): # {{{
|
|||||||
|
|
||||||
if gprefs['automerge'] == 'new record':
|
if gprefs['automerge'] == 'new record':
|
||||||
incoming_fmts = \
|
incoming_fmts = \
|
||||||
set([os.path.splitext(path)[-1].replace('.',
|
{os.path.splitext(path)[-1].replace('.',
|
||||||
'').upper() for path in paths])
|
'').upper() for path in paths}
|
||||||
|
|
||||||
if incoming_fmts.intersection(seen_fmts):
|
if incoming_fmts.intersection(seen_fmts):
|
||||||
# There was at least one duplicate format
|
# There was at least one duplicate format
|
||||||
|
@ -222,7 +222,7 @@ class DeleteAction(InterfaceAction):
|
|||||||
bfmts = m.db.formats(id, index_is_id=True)
|
bfmts = m.db.formats(id, index_is_id=True)
|
||||||
if bfmts is None:
|
if bfmts is None:
|
||||||
continue
|
continue
|
||||||
bfmts = set([x.lower() for x in bfmts.split(',')])
|
bfmts = {x.lower() for x in bfmts.split(',')}
|
||||||
rfmts = bfmts - set(fmts)
|
rfmts = bfmts - set(fmts)
|
||||||
if bfmts - rfmts:
|
if bfmts - rfmts:
|
||||||
# Do not delete if it will leave the book with no
|
# Do not delete if it will leave the book with no
|
||||||
|
@ -259,7 +259,7 @@ class ViewAction(InterfaceAction):
|
|||||||
except:
|
except:
|
||||||
error_dialog(self.gui, _('Cannot view'),
|
error_dialog(self.gui, _('Cannot view'),
|
||||||
_('This book no longer exists in your library'), show=True)
|
_('This book no longer exists in your library'), show=True)
|
||||||
self.update_history([], remove=set([id_]))
|
self.update_history([], remove={id_})
|
||||||
continue
|
continue
|
||||||
|
|
||||||
title = db.title(id_, index_is_id=True)
|
title = db.title(id_, index_is_id=True)
|
||||||
|
@ -25,7 +25,7 @@ class PluginWidget(QWidget, Ui_Form):
|
|||||||
]
|
]
|
||||||
|
|
||||||
sync_enabled = False
|
sync_enabled = False
|
||||||
formats = set(['bib'])
|
formats = {'bib'}
|
||||||
|
|
||||||
def __init__(self, parent=None):
|
def __init__(self, parent=None):
|
||||||
QWidget.__init__(self, parent)
|
QWidget.__init__(self, parent)
|
||||||
|
@ -38,7 +38,7 @@ class PluginWidget(QWidget,Ui_Form):
|
|||||||
sync_enabled = True
|
sync_enabled = True
|
||||||
|
|
||||||
# Formats supported by this plugin
|
# Formats supported by this plugin
|
||||||
formats = set(['azw3','epub','mobi'])
|
formats = {'azw3','epub','mobi'}
|
||||||
|
|
||||||
def __init__(self, parent=None):
|
def __init__(self, parent=None):
|
||||||
QWidget.__init__(self, parent)
|
QWidget.__init__(self, parent)
|
||||||
|
@ -62,8 +62,8 @@ class LookAndFeelWidget(Widget, Ui_Form):
|
|||||||
w = getattr(self, 'filter_css_%s'%key)
|
w = getattr(self, 'filter_css_%s'%key)
|
||||||
if w.isChecked():
|
if w.isChecked():
|
||||||
ans = ans.union(item)
|
ans = ans.union(item)
|
||||||
ans = ans.union(set([x.strip().lower() for x in
|
ans = ans.union({x.strip().lower() for x in
|
||||||
unicode(self.filter_css_others.text()).split(',')]))
|
unicode(self.filter_css_others.text()).split(',')})
|
||||||
return ','.join(ans) if ans else None
|
return ','.join(ans) if ans else None
|
||||||
if g is self.opt_font_size_mapping:
|
if g is self.opt_font_size_mapping:
|
||||||
val = unicode(g.text()).strip()
|
val = unicode(g.text()).strip()
|
||||||
|
@ -1334,10 +1334,10 @@ class BulkText(BulkBase):
|
|||||||
else:
|
else:
|
||||||
txt = rtext
|
txt = rtext
|
||||||
if txt:
|
if txt:
|
||||||
remove = set([v.strip() for v in txt.split(ism['ui_to_list'])])
|
remove = {v.strip() for v in txt.split(ism['ui_to_list'])}
|
||||||
txt = adding
|
txt = adding
|
||||||
if txt:
|
if txt:
|
||||||
add = set([v.strip() for v in txt.split(ism['ui_to_list'])])
|
add = {v.strip() for v in txt.split(ism['ui_to_list'])}
|
||||||
else:
|
else:
|
||||||
add = set()
|
add = set()
|
||||||
self.db.set_custom_bulk_multiple(book_ids, add=add,
|
self.db.set_custom_bulk_multiple(book_ids, add=add,
|
||||||
|
@ -353,9 +353,9 @@ class CheckLibraryDialog(QDialog):
|
|||||||
item = tl.child(i)
|
item = tl.child(i)
|
||||||
id = int(item.data(0, Qt.UserRole))
|
id = int(item.data(0, Qt.UserRole))
|
||||||
all = self.db.formats(id, index_is_id=True, verify_formats=False)
|
all = self.db.formats(id, index_is_id=True, verify_formats=False)
|
||||||
all = set([f.strip() for f in all.split(',')]) if all else set()
|
all = {f.strip() for f in all.split(',')} if all else set()
|
||||||
valid = self.db.formats(id, index_is_id=True, verify_formats=True)
|
valid = self.db.formats(id, index_is_id=True, verify_formats=True)
|
||||||
valid = set([f.strip() for f in valid.split(',')]) if valid else set()
|
valid = {f.strip() for f in valid.split(',')} if valid else set()
|
||||||
for fmt in all-valid:
|
for fmt in all-valid:
|
||||||
self.db.remove_format(id, fmt, index_is_id=True, db_only=True)
|
self.db.remove_format(id, fmt, index_is_id=True, db_only=True)
|
||||||
|
|
||||||
|
@ -351,8 +351,8 @@ class JobManager(QAbstractTableModel, AdaptSQP): # {{{
|
|||||||
self._kill_job(job)
|
self._kill_job(job)
|
||||||
|
|
||||||
def universal_set(self):
|
def universal_set(self):
|
||||||
return set([i for i, j in enumerate(self.jobs) if not getattr(j,
|
return {i for i, j in enumerate(self.jobs) if not getattr(j,
|
||||||
'hidden_in_gui', False)])
|
'hidden_in_gui', False)}
|
||||||
|
|
||||||
def get_matches(self, location, query, candidates=None):
|
def get_matches(self, location, query, candidates=None):
|
||||||
if candidates is None:
|
if candidates is None:
|
||||||
|
@ -641,7 +641,7 @@ class BooksModel(QAbstractTableModel): # {{{
|
|||||||
if not fmts:
|
if not fmts:
|
||||||
fmts = ''
|
fmts = ''
|
||||||
db_formats = set(fmts.lower().split(','))
|
db_formats = set(fmts.lower().split(','))
|
||||||
available_formats = set([f.lower() for f in formats])
|
available_formats = {f.lower() for f in formats}
|
||||||
u = available_formats.intersection(db_formats)
|
u = available_formats.intersection(db_formats)
|
||||||
for f in formats:
|
for f in formats:
|
||||||
if f.lower() in u:
|
if f.lower() in u:
|
||||||
@ -697,7 +697,7 @@ class BooksModel(QAbstractTableModel): # {{{
|
|||||||
if not fmts:
|
if not fmts:
|
||||||
fmts = ''
|
fmts = ''
|
||||||
db_formats = set(fmts.lower().split(','))
|
db_formats = set(fmts.lower().split(','))
|
||||||
available_formats = set([f.lower() for f in formats])
|
available_formats = {f.lower() for f in formats}
|
||||||
u = available_formats.intersection(db_formats)
|
u = available_formats.intersection(db_formats)
|
||||||
for f in formats:
|
for f in formats:
|
||||||
if f.lower() in u:
|
if f.lower() in u:
|
||||||
@ -1104,7 +1104,7 @@ class BooksModel(QAbstractTableModel): # {{{
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
id = self.db.id(row)
|
id = self.db.id(row)
|
||||||
books_to_refresh = set([id])
|
books_to_refresh = {id}
|
||||||
books_to_refresh |= self.db.set_custom(id, val, extra=s_index,
|
books_to_refresh |= self.db.set_custom(id, val, extra=s_index,
|
||||||
label=label, num=None, append=False, notify=True,
|
label=label, num=None, append=False, notify=True,
|
||||||
allow_case_change=True)
|
allow_case_change=True)
|
||||||
@ -1153,7 +1153,7 @@ class BooksModel(QAbstractTableModel): # {{{
|
|||||||
value if column in ('timestamp', 'pubdate')
|
value if column in ('timestamp', 'pubdate')
|
||||||
else re.sub(u'\\s', u' ', unicode(value or '').strip()))
|
else re.sub(u'\\s', u' ', unicode(value or '').strip()))
|
||||||
id = self.db.id(row)
|
id = self.db.id(row)
|
||||||
books_to_refresh = set([id])
|
books_to_refresh = {id}
|
||||||
if column == 'rating':
|
if column == 'rating':
|
||||||
val = max(0, min(int(val or 0), 10))
|
val = max(0, min(int(val or 0), 10))
|
||||||
self.db.set_rating(id, val)
|
self.db.set_rating(id, val)
|
||||||
@ -1243,7 +1243,7 @@ class OnDeviceSearch(SearchQueryParser): # {{{
|
|||||||
if location not in self.USABLE_LOCATIONS:
|
if location not in self.USABLE_LOCATIONS:
|
||||||
return set([])
|
return set([])
|
||||||
matches = set([])
|
matches = set([])
|
||||||
all_locs = set(self.USABLE_LOCATIONS) - set(['all', 'tags'])
|
all_locs = set(self.USABLE_LOCATIONS) - {'all', 'tags'}
|
||||||
locations = all_locs if location == 'all' else [location]
|
locations = all_locs if location == 'all' else [location]
|
||||||
q = {
|
q = {
|
||||||
'title' : lambda x : getattr(x, 'title').lower(),
|
'title' : lambda x : getattr(x, 'title').lower(),
|
||||||
|
@ -1114,8 +1114,8 @@ class BooksView(QTableView): # {{{
|
|||||||
Select rows identified by identifiers. identifiers can be a set of ids,
|
Select rows identified by identifiers. identifiers can be a set of ids,
|
||||||
row numbers or QModelIndexes.
|
row numbers or QModelIndexes.
|
||||||
'''
|
'''
|
||||||
rows = set([x.row() if hasattr(x, 'row') else x for x in
|
rows = {x.row() if hasattr(x, 'row') else x for x in
|
||||||
identifiers])
|
identifiers}
|
||||||
if using_ids:
|
if using_ids:
|
||||||
rows = set([])
|
rows = set([])
|
||||||
identifiers = set(identifiers)
|
identifiers = set(identifiers)
|
||||||
|
@ -911,8 +911,8 @@ class FormatsManager(QWidget):
|
|||||||
db.add_format(id_, ext, spool, notify=False,
|
db.add_format(id_, ext, spool, notify=False,
|
||||||
index_is_id=True)
|
index_is_id=True)
|
||||||
dbfmts = db.formats(id_, index_is_id=True)
|
dbfmts = db.formats(id_, index_is_id=True)
|
||||||
db_extensions = set([fl.lower() for fl in (dbfmts.split(',') if dbfmts
|
db_extensions = {fl.lower() for fl in (dbfmts.split(',') if dbfmts
|
||||||
else [])])
|
else [])}
|
||||||
extensions = new_extensions.union(old_extensions)
|
extensions = new_extensions.union(old_extensions)
|
||||||
for ext in db_extensions:
|
for ext in db_extensions:
|
||||||
if ext not in extensions and ext in self.original_val:
|
if ext not in extensions and ext in self.original_val:
|
||||||
|
@ -45,10 +45,10 @@ class FieldsModel(FM): # {{{
|
|||||||
self.endResetModel()
|
self.endResetModel()
|
||||||
|
|
||||||
def commit(self):
|
def commit(self):
|
||||||
ignored_fields = set([x for x in self.prefs['ignore_fields'] if x not in
|
ignored_fields = {x for x in self.prefs['ignore_fields'] if x not in
|
||||||
self.overrides])
|
self.overrides}
|
||||||
changed = set([k for k, v in self.overrides.iteritems() if v ==
|
changed = {k for k, v in self.overrides.iteritems() if v ==
|
||||||
Qt.Unchecked])
|
Qt.Unchecked}
|
||||||
self.prefs['ignore_fields'] = list(ignored_fields.union(changed))
|
self.prefs['ignore_fields'] = list(ignored_fields.union(changed))
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
@ -140,7 +140,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
|||||||
input_map = prefs['input_format_order']
|
input_map = prefs['input_format_order']
|
||||||
all_formats = set()
|
all_formats = set()
|
||||||
self.opt_input_order.clear()
|
self.opt_input_order.clear()
|
||||||
for fmt in all_input_formats().union(set(['ZIP', 'RAR'])):
|
for fmt in all_input_formats().union({'ZIP', 'RAR'}):
|
||||||
all_formats.add(fmt.upper())
|
all_formats.add(fmt.upper())
|
||||||
for format in input_map + list(all_formats.difference(input_map)):
|
for format in input_map + list(all_formats.difference(input_map)):
|
||||||
item = QListWidgetItem(format, self.opt_input_order)
|
item = QListWidgetItem(format, self.opt_input_order)
|
||||||
|
@ -239,9 +239,9 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
|||||||
if not config_cols:
|
if not config_cols:
|
||||||
config_cols = ['title']
|
config_cols = ['title']
|
||||||
removed_cols = set(model.column_map) - set(config_cols)
|
removed_cols = set(model.column_map) - set(config_cols)
|
||||||
hidden_cols = set([unicode(self.opt_columns.item(i, 0).data(Qt.UserRole) or '')
|
hidden_cols = {unicode(self.opt_columns.item(i, 0).data(Qt.UserRole) or '')
|
||||||
for i in range(self.opt_columns.rowCount())
|
for i in range(self.opt_columns.rowCount())
|
||||||
if self.opt_columns.item(i, 0).checkState()==Qt.Unchecked])
|
if self.opt_columns.item(i, 0).checkState()==Qt.Unchecked}
|
||||||
hidden_cols = hidden_cols.union(removed_cols) # Hide removed cols
|
hidden_cols = hidden_cols.union(removed_cols) # Hide removed cols
|
||||||
hidden_cols = list(hidden_cols.intersection(set(model.column_map)))
|
hidden_cols = list(hidden_cols.intersection(set(model.column_map)))
|
||||||
if 'ondevice' in hidden_cols:
|
if 'ondevice' in hidden_cols:
|
||||||
|
@ -479,14 +479,14 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
|||||||
r('tags_browser_collapse_at', gprefs)
|
r('tags_browser_collapse_at', gprefs)
|
||||||
r('tag_browser_dont_collapse', gprefs, setting=CommaSeparatedList)
|
r('tag_browser_dont_collapse', gprefs, setting=CommaSeparatedList)
|
||||||
|
|
||||||
choices = set([k for k in db.field_metadata.all_field_keys()
|
choices = {k for k in db.field_metadata.all_field_keys()
|
||||||
if (db.field_metadata[k]['is_category'] and (
|
if (db.field_metadata[k]['is_category'] and (
|
||||||
db.field_metadata[k]['datatype'] in ['text', 'series', 'enumeration'
|
db.field_metadata[k]['datatype'] in ['text', 'series', 'enumeration'
|
||||||
]) and not db.field_metadata[k]['display'].get('is_names', False)) or (
|
]) and not db.field_metadata[k]['display'].get('is_names', False)) or (
|
||||||
db.field_metadata[k]['datatype'] in ['composite'
|
db.field_metadata[k]['datatype'] in ['composite'
|
||||||
] and db.field_metadata[k]['display'].get('make_category', False))])
|
] and db.field_metadata[k]['display'].get('make_category', False))}
|
||||||
choices -= set(['authors', 'publisher', 'formats', 'news', 'identifiers'])
|
choices -= {'authors', 'publisher', 'formats', 'news', 'identifiers'}
|
||||||
choices |= set(['search'])
|
choices |= {'search'}
|
||||||
self.opt_categories_using_hierarchy.update_items_cache(choices)
|
self.opt_categories_using_hierarchy.update_items_cache(choices)
|
||||||
r('categories_using_hierarchy', db.prefs, setting=CommaSeparatedList,
|
r('categories_using_hierarchy', db.prefs, setting=CommaSeparatedList,
|
||||||
choices=sorted(list(choices), key=sort_key))
|
choices=sorted(list(choices), key=sort_key))
|
||||||
|
@ -233,10 +233,10 @@ class FieldsModel(QAbstractListModel): # {{{
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def commit(self):
|
def commit(self):
|
||||||
ignored_fields = set([x for x in msprefs['ignore_fields'] if x not in
|
ignored_fields = {x for x in msprefs['ignore_fields'] if x not in
|
||||||
self.overrides])
|
self.overrides}
|
||||||
changed = set([k for k, v in self.overrides.iteritems() if v ==
|
changed = {k for k, v in self.overrides.iteritems() if v ==
|
||||||
Qt.Unchecked])
|
Qt.Unchecked}
|
||||||
msprefs['ignore_fields'] = list(ignored_fields.union(changed))
|
msprefs['ignore_fields'] = list(ignored_fields.union(changed))
|
||||||
|
|
||||||
def user_default_state(self, field):
|
def user_default_state(self, field):
|
||||||
@ -249,10 +249,10 @@ class FieldsModel(QAbstractListModel): # {{{
|
|||||||
self.endResetModel()
|
self.endResetModel()
|
||||||
|
|
||||||
def commit_user_defaults(self):
|
def commit_user_defaults(self):
|
||||||
default_ignored_fields = set([x for x in msprefs['user_default_ignore_fields'] if x not in
|
default_ignored_fields = {x for x in msprefs['user_default_ignore_fields'] if x not in
|
||||||
self.overrides])
|
self.overrides}
|
||||||
changed = set([k for k, v in self.overrides.iteritems() if v ==
|
changed = {k for k, v in self.overrides.iteritems() if v ==
|
||||||
Qt.Unchecked])
|
Qt.Unchecked}
|
||||||
msprefs['user_default_ignore_fields'] = list(default_ignored_fields.union(changed))
|
msprefs['user_default_ignore_fields'] = list(default_ignored_fields.union(changed))
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
@ -292,7 +292,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
|||||||
names = self.all_actions.model().names(x)
|
names = self.all_actions.model().names(x)
|
||||||
if names:
|
if names:
|
||||||
not_added = self.current_actions.model().add(names)
|
not_added = self.current_actions.model().add(names)
|
||||||
ns = set([y.name for y in not_added])
|
ns = {y.name for y in not_added}
|
||||||
added = set(names) - ns
|
added = set(names) - ns
|
||||||
self.all_actions.model().remove(x, added)
|
self.all_actions.model().remove(x, added)
|
||||||
if not_added:
|
if not_added:
|
||||||
@ -311,7 +311,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
|||||||
names = self.current_actions.model().names(x)
|
names = self.current_actions.model().names(x)
|
||||||
if names:
|
if names:
|
||||||
not_removed = self.current_actions.model().remove(x)
|
not_removed = self.current_actions.model().remove(x)
|
||||||
ns = set([y.name for y in not_removed])
|
ns = {y.name for y in not_removed}
|
||||||
removed = set(names) - ns
|
removed = set(names) - ns
|
||||||
self.all_actions.model().add(removed)
|
self.all_actions.model().add(removed)
|
||||||
if not_removed:
|
if not_removed:
|
||||||
|
@ -230,7 +230,7 @@ class SearchFilter(SearchQueryParser):
|
|||||||
if location not in self.USABLE_LOCATIONS:
|
if location not in self.USABLE_LOCATIONS:
|
||||||
return set([])
|
return set([])
|
||||||
matches = set([])
|
matches = set([])
|
||||||
all_locs = set(self.USABLE_LOCATIONS) - set(['all'])
|
all_locs = set(self.USABLE_LOCATIONS) - {'all'}
|
||||||
locations = all_locs if location == 'all' else [location]
|
locations = all_locs if location == 'all' else [location]
|
||||||
q = {
|
q = {
|
||||||
'affiliate': lambda x: x.affiliate,
|
'affiliate': lambda x: x.affiliate,
|
||||||
|
@ -392,7 +392,7 @@ class SearchFilter(SearchQueryParser):
|
|||||||
if location not in self.USABLE_LOCATIONS:
|
if location not in self.USABLE_LOCATIONS:
|
||||||
return set([])
|
return set([])
|
||||||
matches = set([])
|
matches = set([])
|
||||||
all_locs = set(self.USABLE_LOCATIONS) - set(['all'])
|
all_locs = set(self.USABLE_LOCATIONS) - {'all'}
|
||||||
locations = all_locs if location == 'all' else [location]
|
locations = all_locs if location == 'all' else [location]
|
||||||
q = {
|
q = {
|
||||||
'affiliate': attrgetter('affiliate'),
|
'affiliate': attrgetter('affiliate'),
|
||||||
|
@ -152,7 +152,7 @@ class SearchFilter(SearchQueryParser):
|
|||||||
if location not in self.USABLE_LOCATIONS:
|
if location not in self.USABLE_LOCATIONS:
|
||||||
return set([])
|
return set([])
|
||||||
matches = set([])
|
matches = set([])
|
||||||
all_locs = set(self.USABLE_LOCATIONS) - set(['all'])
|
all_locs = set(self.USABLE_LOCATIONS) - {'all'}
|
||||||
locations = all_locs if location == 'all' else [location]
|
locations = all_locs if location == 'all' else [location]
|
||||||
q = {
|
q = {
|
||||||
'author': lambda x: x.author.lower(),
|
'author': lambda x: x.author.lower(),
|
||||||
|
@ -745,7 +745,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
|||||||
return ans
|
return ans
|
||||||
|
|
||||||
def dropMimeData(self, md, action, row, column, parent):
|
def dropMimeData(self, md, action, row, column, parent):
|
||||||
fmts = set([unicode(x) for x in md.formats()])
|
fmts = {unicode(x) for x in md.formats()}
|
||||||
if not fmts.intersection(set(self.mimeTypes())):
|
if not fmts.intersection(set(self.mimeTypes())):
|
||||||
return False
|
return False
|
||||||
if "application/calibre+from_library" in fmts:
|
if "application/calibre+from_library" in fmts:
|
||||||
@ -880,7 +880,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
|||||||
cat_contents = categories.get(on_node.category_key[1:], None)
|
cat_contents = categories.get(on_node.category_key[1:], None)
|
||||||
if cat_contents is None:
|
if cat_contents is None:
|
||||||
return
|
return
|
||||||
cat_contents = set([(v, c) for v,c,ign in cat_contents])
|
cat_contents = {(v, c) for v,c,ign in cat_contents}
|
||||||
|
|
||||||
fm_src = self.db.metadata_for_field(column)
|
fm_src = self.db.metadata_for_field(column)
|
||||||
label = fm_src['label']
|
label = fm_src['label']
|
||||||
@ -903,7 +903,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
|||||||
if value:
|
if value:
|
||||||
if not isinstance(value, list):
|
if not isinstance(value, list):
|
||||||
value = [value]
|
value = [value]
|
||||||
cat_contents |= set([(v, column) for v in value])
|
cat_contents |= {(v, column) for v in value}
|
||||||
|
|
||||||
categories[on_node.category_key[1:]] = [[v, c, 0] for v,c in cat_contents]
|
categories[on_node.category_key[1:]] = [[v, c, 0] for v,c in cat_contents]
|
||||||
self.db.new_api.set_pref('user_categories', categories)
|
self.db.new_api.set_pref('user_categories', categories)
|
||||||
|
@ -264,7 +264,7 @@ class ResultCache(SearchQueryParser): # {{{
|
|||||||
# Search functions {{{
|
# Search functions {{{
|
||||||
|
|
||||||
def universal_set(self):
|
def universal_set(self):
|
||||||
return set([i[0] for i in self._data if i is not None])
|
return {i[0] for i in self._data if i is not None}
|
||||||
|
|
||||||
def change_search_locations(self, locations):
|
def change_search_locations(self, locations):
|
||||||
self.sqp_change_locations(locations)
|
self.sqp_change_locations(locations)
|
||||||
|
@ -27,7 +27,7 @@ class BIBTEX(CatalogPlugin):
|
|||||||
supported_platforms = ['windows', 'osx', 'linux']
|
supported_platforms = ['windows', 'osx', 'linux']
|
||||||
author = 'Sengian'
|
author = 'Sengian'
|
||||||
version = (1, 0, 0)
|
version = (1, 0, 0)
|
||||||
file_types = set(['bib'])
|
file_types = {'bib'}
|
||||||
|
|
||||||
cli_options = [
|
cli_options = [
|
||||||
Option('--fields',
|
Option('--fields',
|
||||||
|
@ -24,7 +24,7 @@ class CSV_XML(CatalogPlugin):
|
|||||||
supported_platforms = ['windows', 'osx', 'linux']
|
supported_platforms = ['windows', 'osx', 'linux']
|
||||||
author = 'Greg Riker'
|
author = 'Greg Riker'
|
||||||
version = (1, 0, 0)
|
version = (1, 0, 0)
|
||||||
file_types = set(['csv', 'xml'])
|
file_types = {'csv', 'xml'}
|
||||||
|
|
||||||
cli_options = [
|
cli_options = [
|
||||||
Option('--fields',
|
Option('--fields',
|
||||||
|
@ -31,7 +31,7 @@ class EPUB_MOBI(CatalogPlugin):
|
|||||||
minimum_calibre_version = (0, 7, 40)
|
minimum_calibre_version = (0, 7, 40)
|
||||||
author = 'Greg Riker'
|
author = 'Greg Riker'
|
||||||
version = (1, 0, 0)
|
version = (1, 0, 0)
|
||||||
file_types = set(['azw3', 'epub', 'mobi'])
|
file_types = {'azw3', 'epub', 'mobi'}
|
||||||
|
|
||||||
THUMB_SMALLEST = "1.0"
|
THUMB_SMALLEST = "1.0"
|
||||||
THUMB_LARGEST = "2.0"
|
THUMB_LARGEST = "2.0"
|
||||||
|
@ -26,9 +26,9 @@ class CustomColumns(object):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def custom_tables(self):
|
def custom_tables(self):
|
||||||
return set([x[0] for x in self.conn.get(
|
return {x[0] for x in self.conn.get(
|
||||||
'SELECT name FROM sqlite_master WHERE type="table" AND '
|
'SELECT name FROM sqlite_master WHERE type="table" AND '
|
||||||
'(name GLOB "custom_column_*" OR name GLOB "books_custom_column_*")')])
|
'(name GLOB "custom_column_*" OR name GLOB "books_custom_column_*")')}
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# Verify that CUSTOM_DATA_TYPES is a (possibly improper) subset of
|
# Verify that CUSTOM_DATA_TYPES is a (possibly improper) subset of
|
||||||
@ -361,7 +361,7 @@ class CustomColumns(object):
|
|||||||
ans = self.conn.get('SELECT value FROM %s'%table)
|
ans = self.conn.get('SELECT value FROM %s'%table)
|
||||||
else:
|
else:
|
||||||
ans = self.conn.get('SELECT DISTINCT value FROM %s'%table)
|
ans = self.conn.get('SELECT DISTINCT value FROM %s'%table)
|
||||||
ans = set([x[0] for x in ans])
|
ans = {x[0] for x in ans}
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
def delete_custom_column(self, label=None, num=None):
|
def delete_custom_column(self, label=None, num=None):
|
||||||
@ -509,7 +509,7 @@ class CustomColumns(object):
|
|||||||
rv = self._set_custom(id, val, label=label, num=num, append=append,
|
rv = self._set_custom(id, val, label=label, num=num, append=append,
|
||||||
notify=notify, extra=extra,
|
notify=notify, extra=extra,
|
||||||
allow_case_change=allow_case_change)
|
allow_case_change=allow_case_change)
|
||||||
self.dirtied(set([id])|rv, commit=False)
|
self.dirtied({id}|rv, commit=False)
|
||||||
if commit:
|
if commit:
|
||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
return rv
|
return rv
|
||||||
@ -590,7 +590,7 @@ class CustomColumns(object):
|
|||||||
if case_change:
|
if case_change:
|
||||||
bks = self.conn.get('SELECT book FROM %s WHERE value=?'%lt,
|
bks = self.conn.get('SELECT book FROM %s WHERE value=?'%lt,
|
||||||
(xid,))
|
(xid,))
|
||||||
books_to_refresh |= set([bk[0] for bk in bks])
|
books_to_refresh |= {bk[0] for bk in bks}
|
||||||
nval = self.conn.get(
|
nval = self.conn.get(
|
||||||
'SELECT custom_%s FROM meta2 WHERE id=?'%data['num'],
|
'SELECT custom_%s FROM meta2 WHERE id=?'%data['num'],
|
||||||
(id_,), all=False)
|
(id_,), all=False)
|
||||||
|
@ -1255,7 +1255,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
formats = self.conn.get('SELECT DISTINCT format from data')
|
formats = self.conn.get('SELECT DISTINCT format from data')
|
||||||
if not formats:
|
if not formats:
|
||||||
return set([])
|
return set([])
|
||||||
return set([f[0] for f in formats])
|
return {f[0] for f in formats}
|
||||||
|
|
||||||
def format_files(self, index, index_is_id=False):
|
def format_files(self, index, index_is_id=False):
|
||||||
id = index if index_is_id else self.id(index)
|
id = index if index_is_id else self.id(index)
|
||||||
@ -2494,7 +2494,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
if case_change:
|
if case_change:
|
||||||
bks = self.conn.get('''SELECT book FROM books_authors_link
|
bks = self.conn.get('''SELECT book FROM books_authors_link
|
||||||
WHERE author=?''', (aid,))
|
WHERE author=?''', (aid,))
|
||||||
books_to_refresh |= set([bk[0] for bk in bks])
|
books_to_refresh |= {bk[0] for bk in bks}
|
||||||
for bk in books_to_refresh:
|
for bk in books_to_refresh:
|
||||||
ss = self.author_sort_from_book(id, index_is_id=True)
|
ss = self.author_sort_from_book(id, index_is_id=True)
|
||||||
aus = self.author_sort(bk, index_is_id=True)
|
aus = self.author_sort(bk, index_is_id=True)
|
||||||
@ -2534,7 +2534,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
self.windows_check_if_files_in_use(id)
|
self.windows_check_if_files_in_use(id)
|
||||||
books_to_refresh = self._set_authors(id, authors,
|
books_to_refresh = self._set_authors(id, authors,
|
||||||
allow_case_change=allow_case_change)
|
allow_case_change=allow_case_change)
|
||||||
self.dirtied(set([id])|books_to_refresh, commit=False)
|
self.dirtied({id}|books_to_refresh, commit=False)
|
||||||
if commit:
|
if commit:
|
||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
self.set_path(id, index_is_id=True)
|
self.set_path(id, index_is_id=True)
|
||||||
@ -2599,7 +2599,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
FROM books_languages_link WHERE
|
FROM books_languages_link WHERE
|
||||||
books_languages_link.lang_code=languages.id) < 1''')
|
books_languages_link.lang_code=languages.id) < 1''')
|
||||||
|
|
||||||
books_to_refresh = set([book_id])
|
books_to_refresh = {book_id}
|
||||||
final_languages = []
|
final_languages = []
|
||||||
for l in languages:
|
for l in languages:
|
||||||
lc = canonicalize_lang(l)
|
lc = canonicalize_lang(l)
|
||||||
@ -2676,12 +2676,12 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
if case_change:
|
if case_change:
|
||||||
bks = self.conn.get('''SELECT book FROM books_publishers_link
|
bks = self.conn.get('''SELECT book FROM books_publishers_link
|
||||||
WHERE publisher=?''', (aid,))
|
WHERE publisher=?''', (aid,))
|
||||||
books_to_refresh |= set([bk[0] for bk in bks])
|
books_to_refresh |= {bk[0] for bk in bks}
|
||||||
self.conn.execute('''DELETE FROM publishers WHERE (SELECT COUNT(id)
|
self.conn.execute('''DELETE FROM publishers WHERE (SELECT COUNT(id)
|
||||||
FROM books_publishers_link
|
FROM books_publishers_link
|
||||||
WHERE publisher=publishers.id) < 1''')
|
WHERE publisher=publishers.id) < 1''')
|
||||||
|
|
||||||
self.dirtied(set([id])|books_to_refresh, commit=False)
|
self.dirtied({id}|books_to_refresh, commit=False)
|
||||||
if commit:
|
if commit:
|
||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
self.data.set(id, self.FIELD_MAP['publisher'], publisher, row_is_id=True)
|
self.data.set(id, self.FIELD_MAP['publisher'], publisher, row_is_id=True)
|
||||||
@ -2990,7 +2990,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
(id,), all=True)
|
(id,), all=True)
|
||||||
if not result:
|
if not result:
|
||||||
return set([])
|
return set([])
|
||||||
return set([r[0] for r in result])
|
return {r[0] for r in result}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def cleanup_tags(cls, tags):
|
def cleanup_tags(cls, tags):
|
||||||
@ -3123,10 +3123,10 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
if case_changed:
|
if case_changed:
|
||||||
bks = self.conn.get('SELECT book FROM books_tags_link WHERE tag=?',
|
bks = self.conn.get('SELECT book FROM books_tags_link WHERE tag=?',
|
||||||
(tid,))
|
(tid,))
|
||||||
books_to_refresh |= set([bk[0] for bk in bks])
|
books_to_refresh |= {bk[0] for bk in bks}
|
||||||
self.conn.execute('''DELETE FROM tags WHERE (SELECT COUNT(id)
|
self.conn.execute('''DELETE FROM tags WHERE (SELECT COUNT(id)
|
||||||
FROM books_tags_link WHERE tag=tags.id) < 1''')
|
FROM books_tags_link WHERE tag=tags.id) < 1''')
|
||||||
self.dirtied(set([id])|books_to_refresh, commit=False)
|
self.dirtied({id}|books_to_refresh, commit=False)
|
||||||
if commit:
|
if commit:
|
||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
tags = u','.join(self.get_tags(id))
|
tags = u','.join(self.get_tags(id))
|
||||||
@ -3202,7 +3202,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
if case_change:
|
if case_change:
|
||||||
bks = self.conn.get('SELECT book FROM books_series_link WHERE series=?',
|
bks = self.conn.get('SELECT book FROM books_series_link WHERE series=?',
|
||||||
(aid,))
|
(aid,))
|
||||||
books_to_refresh |= set([bk[0] for bk in bks])
|
books_to_refresh |= {bk[0] for bk in bks}
|
||||||
self.conn.execute('''DELETE FROM series
|
self.conn.execute('''DELETE FROM series
|
||||||
WHERE (SELECT COUNT(id) FROM books_series_link
|
WHERE (SELECT COUNT(id) FROM books_series_link
|
||||||
WHERE series=series.id) < 1''')
|
WHERE series=series.id) < 1''')
|
||||||
@ -3598,7 +3598,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
for x in items:
|
for x in items:
|
||||||
path_map[x.lower()] = x
|
path_map[x.lower()] = x
|
||||||
items = set(path_map)
|
items = set(path_map)
|
||||||
paths = set([x.lower() for x in paths])
|
paths = {x.lower() for x in paths}
|
||||||
items = items.intersection(paths)
|
items = items.intersection(paths)
|
||||||
return items, path_map
|
return items, path_map
|
||||||
|
|
||||||
|
@ -1221,7 +1221,7 @@ class BuiltinListUnion(BuiltinFormatterFunction):
|
|||||||
def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):
|
def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):
|
||||||
res = [l.strip() for l in list1.split(separator) if l.strip()]
|
res = [l.strip() for l in list1.split(separator) if l.strip()]
|
||||||
l2 = [l.strip() for l in list2.split(separator) if l.strip()]
|
l2 = [l.strip() for l in list2.split(separator) if l.strip()]
|
||||||
lcl1 = set([icu_lower(l) for l in res])
|
lcl1 = {icu_lower(l) for l in res}
|
||||||
|
|
||||||
for i in l2:
|
for i in l2:
|
||||||
if icu_lower(i) not in lcl1 and i not in res:
|
if icu_lower(i) not in lcl1 and i not in res:
|
||||||
@ -1242,7 +1242,7 @@ class BuiltinListDifference(BuiltinFormatterFunction):
|
|||||||
|
|
||||||
def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):
|
def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):
|
||||||
l1 = [l.strip() for l in list1.split(separator) if l.strip()]
|
l1 = [l.strip() for l in list1.split(separator) if l.strip()]
|
||||||
l2 = set([icu_lower(l.strip()) for l in list2.split(separator) if l.strip()])
|
l2 = {icu_lower(l.strip()) for l in list2.split(separator) if l.strip()}
|
||||||
|
|
||||||
res = []
|
res = []
|
||||||
for i in l1:
|
for i in l1:
|
||||||
@ -1264,7 +1264,7 @@ class BuiltinListIntersection(BuiltinFormatterFunction):
|
|||||||
|
|
||||||
def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):
|
def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):
|
||||||
l1 = [l.strip() for l in list1.split(separator) if l.strip()]
|
l1 = [l.strip() for l in list1.split(separator) if l.strip()]
|
||||||
l2 = set([icu_lower(l.strip()) for l in list2.split(separator) if l.strip()])
|
l2 = {icu_lower(l.strip()) for l in list2.split(separator) if l.strip()}
|
||||||
|
|
||||||
res = []
|
res = []
|
||||||
for i in l1:
|
for i in l1:
|
||||||
@ -1303,8 +1303,8 @@ class BuiltinListEquals(BuiltinFormatterFunction):
|
|||||||
'The comparison is case insensitive.')
|
'The comparison is case insensitive.')
|
||||||
|
|
||||||
def evaluate(self, formatter, kwargs, mi, locals, list1, sep1, list2, sep2, yes_val, no_val):
|
def evaluate(self, formatter, kwargs, mi, locals, list1, sep1, list2, sep2, yes_val, no_val):
|
||||||
s1 = set([icu_lower(l.strip()) for l in list1.split(sep1) if l.strip()])
|
s1 = {icu_lower(l.strip()) for l in list1.split(sep1) if l.strip()}
|
||||||
s2 = set([icu_lower(l.strip()) for l in list2.split(sep2) if l.strip()])
|
s2 = {icu_lower(l.strip()) for l in list2.split(sep2) if l.strip()}
|
||||||
if s1 == s2:
|
if s1 == s2:
|
||||||
return yes_val
|
return yes_val
|
||||||
return no_val
|
return no_val
|
||||||
|
@ -302,27 +302,27 @@ class Tester(SearchQueryParser):
|
|||||||
}
|
}
|
||||||
|
|
||||||
tests = {
|
tests = {
|
||||||
'Dysfunction' : set([348]),
|
'Dysfunction' : {348},
|
||||||
'title:Dysfunction' : set([348]),
|
'title:Dysfunction' : {348},
|
||||||
'Title:Dysfunction' : set([348]),
|
'Title:Dysfunction' : {348},
|
||||||
'title:Dysfunction OR author:Laurie': set([348, 444]),
|
'title:Dysfunction OR author:Laurie': {348, 444},
|
||||||
'(tag:txt or tag:pdf)': set([33, 258, 354, 305, 242, 51, 55, 56, 154]),
|
'(tag:txt or tag:pdf)': {33, 258, 354, 305, 242, 51, 55, 56, 154},
|
||||||
'(tag:txt OR tag:pdf) and author:Tolstoy': set([55, 56]),
|
'(tag:txt OR tag:pdf) and author:Tolstoy': {55, 56},
|
||||||
'Tolstoy txt': set([55, 56]),
|
'Tolstoy txt': {55, 56},
|
||||||
'Hamilton Amsterdam' : set([]),
|
'Hamilton Amsterdam' : set([]),
|
||||||
u'Beär' : set([91]),
|
u'Beär' : {91},
|
||||||
'dysfunc or tolstoy': set([348, 55, 56]),
|
'dysfunc or tolstoy': {348, 55, 56},
|
||||||
'tag:txt AND NOT tolstoy': set([33, 258, 354, 305, 242, 154]),
|
'tag:txt AND NOT tolstoy': {33, 258, 354, 305, 242, 154},
|
||||||
'not tag:lrf' : set([305]),
|
'not tag:lrf' : {305},
|
||||||
'london:thames': set([13]),
|
'london:thames': {13},
|
||||||
'publisher:london:thames': set([13]),
|
'publisher:london:thames': {13},
|
||||||
'"(1977)"': set([13]),
|
'"(1977)"': {13},
|
||||||
'jack weatherford orc': set([30]),
|
'jack weatherford orc': {30},
|
||||||
'S\\"calzi': {343},
|
'S\\"calzi': {343},
|
||||||
'author:S\\"calzi': {343},
|
'author:S\\"calzi': {343},
|
||||||
'"S\\"calzi"': {343},
|
'"S\\"calzi"': {343},
|
||||||
'M\\\\cMurtry': {427},
|
'M\\\\cMurtry': {427},
|
||||||
'author:Tolstoy (tag:txt OR tag:pdf)': set([55, 56]),
|
'author:Tolstoy (tag:txt OR tag:pdf)': {55, 56},
|
||||||
}
|
}
|
||||||
fields = {'title':0, 'author':1, 'publisher':2, 'tag':3}
|
fields = {'title':0, 'author':1, 'publisher':2, 'tag':3}
|
||||||
|
|
||||||
|
@ -522,9 +522,9 @@ class _FeedParserMixin:
|
|||||||
}
|
}
|
||||||
_matchnamespaces = {}
|
_matchnamespaces = {}
|
||||||
|
|
||||||
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
|
can_be_relative_uri = {'link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'}
|
||||||
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
|
can_contain_relative_uris = {'content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'}
|
||||||
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
|
can_contain_dangerous_markup = {'content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'}
|
||||||
html_types = [u'text/html', u'application/xhtml+xml']
|
html_types = [u'text/html', u'application/xhtml+xml']
|
||||||
|
|
||||||
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
|
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
|
||||||
@ -1859,11 +1859,11 @@ if _XML_AVAILABLE:
|
|||||||
class _BaseHTMLProcessor(sgmllib.SGMLParser):
|
class _BaseHTMLProcessor(sgmllib.SGMLParser):
|
||||||
special = re.compile('''[<>'"]''')
|
special = re.compile('''[<>'"]''')
|
||||||
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
|
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
|
||||||
elements_no_end_tag = set([
|
elements_no_end_tag = {
|
||||||
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
|
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
|
||||||
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
|
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
|
||||||
'source', 'track', 'wbr'
|
'source', 'track', 'wbr'
|
||||||
])
|
}
|
||||||
|
|
||||||
def __init__(self, encoding, _type):
|
def __init__(self, encoding, _type):
|
||||||
self.encoding = encoding
|
self.encoding = encoding
|
||||||
@ -2080,8 +2080,8 @@ class _MicroformatsParser:
|
|||||||
NODE = 4
|
NODE = 4
|
||||||
EMAIL = 5
|
EMAIL = 5
|
||||||
|
|
||||||
known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'])
|
known_xfn_relationships = {'contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'}
|
||||||
known_binary_extensions = set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'])
|
known_binary_extensions = {'zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'}
|
||||||
|
|
||||||
def __init__(self, data, baseuri, encoding):
|
def __init__(self, data, baseuri, encoding):
|
||||||
self.document = BeautifulSoup.BeautifulSoup(data)
|
self.document = BeautifulSoup.BeautifulSoup(data)
|
||||||
@ -2514,7 +2514,7 @@ def _parseMicroformats(htmlSource, baseURI, encoding):
|
|||||||
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
|
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
|
||||||
|
|
||||||
class _RelativeURIResolver(_BaseHTMLProcessor):
|
class _RelativeURIResolver(_BaseHTMLProcessor):
|
||||||
relative_uris = set([('a', 'href'),
|
relative_uris = {('a', 'href'),
|
||||||
('applet', 'codebase'),
|
('applet', 'codebase'),
|
||||||
('area', 'href'),
|
('area', 'href'),
|
||||||
('blockquote', 'cite'),
|
('blockquote', 'cite'),
|
||||||
@ -2539,7 +2539,7 @@ class _RelativeURIResolver(_BaseHTMLProcessor):
|
|||||||
('object', 'usemap'),
|
('object', 'usemap'),
|
||||||
('q', 'cite'),
|
('q', 'cite'),
|
||||||
('script', 'src'),
|
('script', 'src'),
|
||||||
('video', 'poster')])
|
('video', 'poster')}
|
||||||
|
|
||||||
def __init__(self, baseuri, encoding, _type):
|
def __init__(self, baseuri, encoding, _type):
|
||||||
_BaseHTMLProcessor.__init__(self, encoding, _type)
|
_BaseHTMLProcessor.__init__(self, encoding, _type)
|
||||||
@ -2584,7 +2584,7 @@ def _makeSafeAbsoluteURI(base, rel=None):
|
|||||||
return uri
|
return uri
|
||||||
|
|
||||||
class _HTMLSanitizer(_BaseHTMLProcessor):
|
class _HTMLSanitizer(_BaseHTMLProcessor):
|
||||||
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
|
acceptable_elements = {'a', 'abbr', 'acronym', 'address', 'area',
|
||||||
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
|
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
|
||||||
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
|
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
|
||||||
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
|
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
|
||||||
@ -2596,9 +2596,9 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
|
|||||||
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
|
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
|
||||||
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
|
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
|
||||||
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
|
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
|
||||||
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
|
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'}
|
||||||
|
|
||||||
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
|
acceptable_attributes = {'abbr', 'accept', 'accept-charset', 'accesskey',
|
||||||
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
|
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
|
||||||
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
|
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
|
||||||
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
|
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
|
||||||
@ -2619,11 +2619,11 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
|
|||||||
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
|
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
|
||||||
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
|
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
|
||||||
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
|
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
|
||||||
'width', 'wrap', 'xml:lang'])
|
'width', 'wrap', 'xml:lang'}
|
||||||
|
|
||||||
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
|
unacceptable_elements_with_end_tag = {'script', 'applet', 'style'}
|
||||||
|
|
||||||
acceptable_css_properties = set(['azimuth', 'background-color',
|
acceptable_css_properties = {'azimuth', 'background-color',
|
||||||
'border-bottom-color', 'border-collapse', 'border-color',
|
'border-bottom-color', 'border-collapse', 'border-color',
|
||||||
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
|
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
|
||||||
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
|
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
|
||||||
@ -2633,26 +2633,26 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
|
|||||||
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
|
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
|
||||||
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
|
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
|
||||||
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
|
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
|
||||||
'white-space', 'width'])
|
'white-space', 'width'}
|
||||||
|
|
||||||
# survey of common keywords found in feeds
|
# survey of common keywords found in feeds
|
||||||
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
|
acceptable_css_keywords = {'auto', 'aqua', 'black', 'block', 'blue',
|
||||||
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
|
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
|
||||||
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
|
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
|
||||||
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
|
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
|
||||||
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
|
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
|
||||||
'transparent', 'underline', 'white', 'yellow'])
|
'transparent', 'underline', 'white', 'yellow'}
|
||||||
|
|
||||||
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
|
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
|
||||||
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
|
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
|
||||||
|
|
||||||
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
|
mathml_elements = {'annotation', 'annotation-xml', 'maction', 'math',
|
||||||
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
|
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
|
||||||
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
|
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
|
||||||
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
|
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
|
||||||
'munderover', 'none', 'semantics'])
|
'munderover', 'none', 'semantics'}
|
||||||
|
|
||||||
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
|
mathml_attributes = {'actiontype', 'align', 'columnalign', 'columnalign',
|
||||||
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
|
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
|
||||||
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
|
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
|
||||||
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
|
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
|
||||||
@ -2660,18 +2660,18 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
|
|||||||
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
|
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
|
||||||
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
|
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
|
||||||
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
|
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
|
||||||
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
|
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'}
|
||||||
|
|
||||||
# svgtiny - foreignObject + linearGradient + radialGradient + stop
|
# svgtiny - foreignObject + linearGradient + radialGradient + stop
|
||||||
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
|
svg_elements = {'a', 'animate', 'animateColor', 'animateMotion',
|
||||||
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
|
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
|
||||||
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
|
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
|
||||||
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
|
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
|
||||||
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
|
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
|
||||||
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
|
'svg', 'switch', 'text', 'title', 'tspan', 'use'}
|
||||||
|
|
||||||
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
|
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
|
||||||
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
|
svg_attributes = {'accent-height', 'accumulate', 'additive', 'alphabetic',
|
||||||
'arabic-form', 'ascent', 'attributeName', 'attributeType',
|
'arabic-form', 'ascent', 'attributeName', 'attributeType',
|
||||||
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
|
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
|
||||||
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
|
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
|
||||||
@ -2697,14 +2697,14 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
|
|||||||
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
|
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
|
||||||
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
|
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
|
||||||
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
|
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
|
||||||
'y2', 'zoomAndPan'])
|
'y2', 'zoomAndPan'}
|
||||||
|
|
||||||
svg_attr_map = None
|
svg_attr_map = None
|
||||||
svg_elem_map = None
|
svg_elem_map = None
|
||||||
|
|
||||||
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
|
acceptable_svg_properties = { 'fill', 'fill-opacity', 'fill-rule',
|
||||||
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
|
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
|
||||||
'stroke-opacity'])
|
'stroke-opacity'}
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
_BaseHTMLProcessor.reset(self)
|
_BaseHTMLProcessor.reset(self)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user