mirror of
https://github.com/Kareadita/Kavita.git
synced 2025-06-02 21:24:18 -04:00
* Started with the redesign of the cover image chooser redesign to be less click intensive for volume/chapter images. Made some headings bold in card detail drawer. * Tweaked the styles * Moved where the info cards show * Added an ability to open a page settings drawer * Cleaned up some old code that isn't needed anymore. * Started implementing a list view. Refactored some title code to a dedicated component * List view implemented but way too many API calls. Either need caching or adjusting the SeriesDetail api. * Fixed a bug where if the progress bar didn't render on a card item while a download was in progress, the download indicator would be removed. * Large refactor to move a lot of the needed fields to the chapter and volume dtos for series detail. All fields are noted when only used in series detail. * Implemented cards for other tabs (except related) * Fixed the unit test which needed a mocked reader service call. * More cleanup around age rating and removing old code from the refactor. Commented out sorting till i feel motivated to work on that. * Some cleanup and restored cards as initial layout. Time to test this out and see if there is value add. * Added ability for Chapters tab to show the volume chapters belong to (if applicable) * Adding style fixes * Cover image updates, don't allow the first image (which is what is currently set) to respond to cover changes. Hide the ID field on list item for series detail. * Refactored the title for list item to be injectable * Cleaned up the selection code to make it less finicky on mobile when tap scrolling. * Refactored chapter tab to show volume as well on list view. * Ensure word count shows for Volumes * Started adding virtual scrolling, pushing up so Robbie can mess around * Started adding virtual scrolling, pushing up so Robbie can mess around * Fixed a bug where all chapters would come under specials * Show title data as accent if set. * Style fixes for virtual scroller * Restyling scroll * Implemented a way to show storyline with virtual scrolling * Show Word Count for chapters and cleaned up some logics. * I might have card layout working with virtual scroll code. * Some cleanup to hide more system like properties from info bar on series detail page. Fixed some missing time estimate info on storyline chapters. * Fixed a regression on series service when I integrated VolumeTitle. * Refactored read time to the backend. Added WordCount to the volume itself so we don't need to calculate on frontend. When asking to analyze files from a series, force the calculation. * Fixed SeriesDetail api code * Fixed up the code in the drawer to better update list/card mode * Basic infinite scroll implemented, however due to how we are updating the list to render, we are re-rending cards that haven't been touched. * Updated how we render and layout data for infinite scroll on library detail. It's almost there. * Started laying foundation for loading pages backwards. Removed lazy loading of images since we are now using virtual paging. * Hooked in some basic code to allow user to load a prev page with infinite scroll. * Fixed up series detail api and undid the non-lazy loaded images. Changed the router to help with this infinite loading on Firefox issue. * Fixed up some naming issues with Series Detail and added a new test. * This is an infinite scroll without pagination implementation. It is not fully done, but off to a good start. Virtual scroller with jump bar is working pretty well, def needs more polishing and tweaking. There are hacks in this implementation that need to be revisited. * Refactored code so that we don't use any pagination and load all results by default. * Misc code cleanup from build warnings. * Cleaned up some logic for how to display titles in list view. * More title cleanup for specials * Hooked up page layout to user preferences and renamed an existing user pref name to match the dto. * Swapped out everything but storyline with virtual-scroller over CDK * Removed CDK from series detail. * Default value for migration on page layout * Updating card layout for library detail page * fixing height for mobile * Moved scrollbar * Tweaked some styling for layouts when there is no data * Refactored the series cards into their own component to make it re-usable. * More tweaks on series info cards layout and enhanced a few pages with trackby functions. * Removed some dead code * Added download on series detail to actionables to fit in with new scroll strategy. * Fixed language not being updated and sent to the backend for series update. * Fixed a bad migration (if you ran any prior migration in this branch, you need to undo before you use this commit) * Adding sticky tabs * fixed mobile gap on sticky tab * Enhanced the card title for books to show number up front. * Adjusted the gutters on admin dashboard * Removed debug code * Removing duplicate book title * Cleaned up old references to cdk scroller * Implemented a basic jump bar scaling algorithm. Not perfect, but works pretty well. * Code smells Co-authored-by: Robbie Davis <robbie@therobbiedavis.com>
485 lines
22 KiB
C#
485 lines
22 KiB
C#
using System;
|
|
using System.Collections.Generic;
|
|
using System.Diagnostics;
|
|
using System.IO;
|
|
using System.IO.Compression;
|
|
using System.Linq;
|
|
using System.Text;
|
|
using System.Threading.Tasks;
|
|
using System.Xml.Serialization;
|
|
using API.Archive;
|
|
using API.Data.Metadata;
|
|
using API.Extensions;
|
|
using API.Services.Tasks;
|
|
using Kavita.Common;
|
|
using Microsoft.Extensions.Logging;
|
|
using SharpCompress.Archives;
|
|
using SharpCompress.Common;
|
|
|
|
namespace API.Services
|
|
{
|
|
public interface IArchiveService
|
|
{
|
|
void ExtractArchive(string archivePath, string extractPath);
|
|
int GetNumberOfPagesFromArchive(string archivePath);
|
|
string GetCoverImage(string archivePath, string fileName, string outputDirectory);
|
|
bool IsValidArchive(string archivePath);
|
|
ComicInfo GetComicInfo(string archivePath);
|
|
ArchiveLibrary CanOpen(string archivePath);
|
|
bool ArchiveNeedsFlattening(ZipArchive archive);
|
|
Task<Tuple<byte[], string>> CreateZipForDownload(IEnumerable<string> files, string tempFolder);
|
|
}
|
|
|
|
/// <summary>
|
|
/// Responsible for manipulating Archive files. Used by <see cref="CacheService"/> and <see cref="ScannerService"/>
|
|
/// </summary>
|
|
// ReSharper disable once ClassWithVirtualMembersNeverInherited.Global
|
|
public class ArchiveService : IArchiveService
|
|
{
|
|
private readonly ILogger<ArchiveService> _logger;
|
|
private readonly IDirectoryService _directoryService;
|
|
private readonly IImageService _imageService;
|
|
private const string ComicInfoFilename = "comicinfo";
|
|
|
|
public ArchiveService(ILogger<ArchiveService> logger, IDirectoryService directoryService, IImageService imageService)
|
|
{
|
|
_logger = logger;
|
|
_directoryService = directoryService;
|
|
_imageService = imageService;
|
|
}
|
|
|
|
/// <summary>
|
|
/// Checks if a File can be opened. Requires up to 2 opens of the filestream.
|
|
/// </summary>
|
|
/// <param name="archivePath"></param>
|
|
/// <returns></returns>
|
|
public virtual ArchiveLibrary CanOpen(string archivePath)
|
|
{
|
|
if (string.IsNullOrEmpty(archivePath) || !(File.Exists(archivePath) && Parser.Parser.IsArchive(archivePath) || Parser.Parser.IsEpub(archivePath))) return ArchiveLibrary.NotSupported;
|
|
|
|
var ext = _directoryService.FileSystem.Path.GetExtension(archivePath).ToUpper();
|
|
if (ext.Equals(".CBR") || ext.Equals(".RAR")) return ArchiveLibrary.SharpCompress;
|
|
|
|
try
|
|
{
|
|
using var a2 = ZipFile.OpenRead(archivePath);
|
|
return ArchiveLibrary.Default;
|
|
}
|
|
catch (Exception)
|
|
{
|
|
try
|
|
{
|
|
using var a1 = ArchiveFactory.Open(archivePath);
|
|
return ArchiveLibrary.SharpCompress;
|
|
}
|
|
catch (Exception)
|
|
{
|
|
return ArchiveLibrary.NotSupported;
|
|
}
|
|
}
|
|
}
|
|
|
|
public int GetNumberOfPagesFromArchive(string archivePath)
|
|
{
|
|
if (!IsValidArchive(archivePath))
|
|
{
|
|
_logger.LogError("Archive {ArchivePath} could not be found", archivePath);
|
|
return 0;
|
|
}
|
|
|
|
try
|
|
{
|
|
var libraryHandler = CanOpen(archivePath);
|
|
switch (libraryHandler)
|
|
{
|
|
case ArchiveLibrary.Default:
|
|
{
|
|
using var archive = ZipFile.OpenRead(archivePath);
|
|
return archive.Entries.Count(e => !Parser.Parser.HasBlacklistedFolderInPath(e.FullName) && Parser.Parser.IsImage(e.FullName));
|
|
}
|
|
case ArchiveLibrary.SharpCompress:
|
|
{
|
|
using var archive = ArchiveFactory.Open(archivePath);
|
|
return archive.Entries.Count(entry => !entry.IsDirectory &&
|
|
!Parser.Parser.HasBlacklistedFolderInPath(Path.GetDirectoryName(entry.Key) ?? string.Empty)
|
|
&& Parser.Parser.IsImage(entry.Key));
|
|
}
|
|
case ArchiveLibrary.NotSupported:
|
|
_logger.LogWarning("[GetNumberOfPagesFromArchive] This archive cannot be read: {ArchivePath}. Defaulting to 0 pages", archivePath);
|
|
return 0;
|
|
default:
|
|
_logger.LogWarning("[GetNumberOfPagesFromArchive] There was an exception when reading archive stream: {ArchivePath}. Defaulting to 0 pages", archivePath);
|
|
return 0;
|
|
}
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
_logger.LogWarning(ex, "[GetNumberOfPagesFromArchive] There was an exception when reading archive stream: {ArchivePath}. Defaulting to 0 pages", archivePath);
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/// <summary>
|
|
/// Finds the first instance of a folder entry and returns it
|
|
/// </summary>
|
|
/// <param name="entryFullNames"></param>
|
|
/// <returns>Entry name of match, null if no match</returns>
|
|
public static string FindFolderEntry(IEnumerable<string> entryFullNames)
|
|
{
|
|
var result = entryFullNames
|
|
.Where(path => !(Path.EndsInDirectorySeparator(path) || Parser.Parser.HasBlacklistedFolderInPath(path) || path.StartsWith(Parser.Parser.MacOsMetadataFileStartsWith)))
|
|
.OrderByNatural(Path.GetFileNameWithoutExtension)
|
|
.FirstOrDefault(Parser.Parser.IsCoverImage);
|
|
|
|
return string.IsNullOrEmpty(result) ? null : result;
|
|
}
|
|
|
|
/// <summary>
|
|
/// Returns first entry that is an image and is not in a blacklisted folder path. Uses <see cref="OrderByNatural"/> for ordering files
|
|
/// </summary>
|
|
/// <param name="entryFullNames"></param>
|
|
/// <returns>Entry name of match, null if no match</returns>
|
|
public static string? FirstFileEntry(IEnumerable<string> entryFullNames, string archiveName)
|
|
{
|
|
// First check if there are any files that are not in a nested folder before just comparing by filename. This is needed
|
|
// because NaturalSortComparer does not work with paths and doesn't seem 001.jpg as before chapter 1/001.jpg.
|
|
var fullNames = entryFullNames
|
|
.Where(path => !(Path.EndsInDirectorySeparator(path) || Parser.Parser.HasBlacklistedFolderInPath(path) || path.StartsWith(Parser.Parser.MacOsMetadataFileStartsWith)) && Parser.Parser.IsImage(path))
|
|
.OrderByNatural(c => c.GetFullPathWithoutExtension())
|
|
.ToList();
|
|
if (fullNames.Count == 0) return null;
|
|
|
|
var nonNestedFile = fullNames.Where(entry => (Path.GetDirectoryName(entry) ?? string.Empty).Equals(archiveName))
|
|
.OrderByNatural(c => c.GetFullPathWithoutExtension())
|
|
.FirstOrDefault();
|
|
|
|
if (!string.IsNullOrEmpty(nonNestedFile)) return nonNestedFile;
|
|
|
|
// Check the first folder and sort within that to see if we can find a file, else fallback to first file with basic sort.
|
|
// Get first folder, then sort within that
|
|
var firstDirectoryFile = fullNames.OrderByNatural(Path.GetDirectoryName).FirstOrDefault();
|
|
if (!string.IsNullOrEmpty(firstDirectoryFile))
|
|
{
|
|
var firstDirectory = Path.GetDirectoryName(firstDirectoryFile);
|
|
if (!string.IsNullOrEmpty(firstDirectory))
|
|
{
|
|
var firstDirectoryResult = fullNames.Where(f => firstDirectory.Equals(Path.GetDirectoryName(f)))
|
|
.OrderByNatural(Path.GetFileNameWithoutExtension)
|
|
.FirstOrDefault();
|
|
|
|
if (!string.IsNullOrEmpty(firstDirectoryResult)) return firstDirectoryResult;
|
|
}
|
|
}
|
|
|
|
var result = fullNames
|
|
.OrderByNatural(Path.GetFileNameWithoutExtension)
|
|
.FirstOrDefault();
|
|
|
|
return string.IsNullOrEmpty(result) ? null : result;
|
|
}
|
|
|
|
|
|
/// <summary>
|
|
/// Generates byte array of cover image.
|
|
/// Given a path to a compressed file <see cref="Parser.Parser.ArchiveFileExtensions"/>, will ensure the first image (respects directory structure) is returned unless
|
|
/// a folder/cover.(image extension) exists in the the compressed file (if duplicate, the first is chosen)
|
|
///
|
|
/// This skips over any __MACOSX folder/file iteration.
|
|
/// </summary>
|
|
/// <remarks>This always creates a thumbnail</remarks>
|
|
/// <param name="archivePath"></param>
|
|
/// <param name="fileName">File name to use based on context of entity.</param>
|
|
/// <returns></returns>
|
|
public string GetCoverImage(string archivePath, string fileName, string outputDirectory)
|
|
{
|
|
if (archivePath == null || !IsValidArchive(archivePath)) return string.Empty;
|
|
try
|
|
{
|
|
var libraryHandler = CanOpen(archivePath);
|
|
switch (libraryHandler)
|
|
{
|
|
case ArchiveLibrary.Default:
|
|
{
|
|
using var archive = ZipFile.OpenRead(archivePath);
|
|
|
|
var entryName = FindCoverImageFilename(archivePath, archive.Entries.Select(e => e.FullName));
|
|
var entry = archive.Entries.Single(e => e.FullName == entryName);
|
|
|
|
using var stream = entry.Open();
|
|
return _imageService.WriteCoverThumbnail(stream, fileName, outputDirectory);
|
|
}
|
|
case ArchiveLibrary.SharpCompress:
|
|
{
|
|
using var archive = ArchiveFactory.Open(archivePath);
|
|
var entryNames = archive.Entries.Where(archiveEntry => !archiveEntry.IsDirectory).Select(e => e.Key).ToList();
|
|
|
|
var entryName = FindCoverImageFilename(archivePath, entryNames);
|
|
var entry = archive.Entries.Single(e => e.Key == entryName);
|
|
|
|
using var stream = entry.OpenEntryStream();
|
|
return _imageService.WriteCoverThumbnail(stream, fileName, outputDirectory);
|
|
}
|
|
case ArchiveLibrary.NotSupported:
|
|
_logger.LogWarning("[GetCoverImage] This archive cannot be read: {ArchivePath}. Defaulting to no cover image", archivePath);
|
|
return string.Empty;
|
|
default:
|
|
_logger.LogWarning("[GetCoverImage] There was an exception when reading archive stream: {ArchivePath}. Defaulting to no cover image", archivePath);
|
|
return string.Empty;
|
|
}
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
_logger.LogWarning(ex, "[GetCoverImage] There was an exception when reading archive stream: {ArchivePath}. Defaulting to no cover image", archivePath);
|
|
}
|
|
|
|
return string.Empty;
|
|
}
|
|
|
|
/// <summary>
|
|
/// Given a list of image paths (assume within an archive), find the filename that corresponds to the cover
|
|
/// </summary>
|
|
/// <param name="archivePath"></param>
|
|
/// <param name="entryNames"></param>
|
|
/// <returns></returns>
|
|
public static string FindCoverImageFilename(string archivePath, IEnumerable<string> entryNames)
|
|
{
|
|
var entryName = FindFolderEntry(entryNames) ?? FirstFileEntry(entryNames, Path.GetFileName(archivePath));
|
|
return entryName;
|
|
}
|
|
|
|
/// <summary>
|
|
/// Given an archive stream, will assess whether directory needs to be flattened so that the extracted archive files are directly
|
|
/// under extract path and not nested in subfolders. See <see cref="DirectoryService"/> Flatten method.
|
|
/// </summary>
|
|
/// <param name="archive">An opened archive stream</param>
|
|
/// <returns></returns>
|
|
public bool ArchiveNeedsFlattening(ZipArchive archive)
|
|
{
|
|
// Sometimes ZipArchive will list the directory and others it will just keep it in the FullName
|
|
return archive.Entries.Count > 0 &&
|
|
!Path.HasExtension(archive.Entries.ElementAt(0).FullName) ||
|
|
archive.Entries.Any(e => e.FullName.Contains(Path.AltDirectorySeparatorChar) && !Parser.Parser.HasBlacklistedFolderInPath(e.FullName));
|
|
}
|
|
|
|
// TODO: Refactor CreateZipForDownload to return the temp file so we can stream it from temp
|
|
/// <summary>
|
|
///
|
|
/// </summary>
|
|
/// <param name="files"></param>
|
|
/// <param name="tempFolder">Temp folder name to use for preparing the files. Will be created and deleted</param>
|
|
/// <returns></returns>
|
|
/// <exception cref="KavitaException"></exception>
|
|
public async Task<Tuple<byte[], string>> CreateZipForDownload(IEnumerable<string> files, string tempFolder)
|
|
{
|
|
var dateString = DateTime.Now.ToShortDateString().Replace("/", "_");
|
|
|
|
var tempLocation = Path.Join(_directoryService.TempDirectory, $"{tempFolder}_{dateString}");
|
|
_directoryService.ExistOrCreate(tempLocation);
|
|
if (!_directoryService.CopyFilesToDirectory(files, tempLocation))
|
|
{
|
|
throw new KavitaException("Unable to copy files to temp directory archive download.");
|
|
}
|
|
|
|
var zipPath = Path.Join(_directoryService.TempDirectory, $"kavita_{tempFolder}_{dateString}.zip");
|
|
try
|
|
{
|
|
ZipFile.CreateFromDirectory(tempLocation, zipPath);
|
|
}
|
|
catch (AggregateException ex)
|
|
{
|
|
_logger.LogError(ex, "There was an issue creating temp archive");
|
|
throw new KavitaException("There was an issue creating temp archive");
|
|
}
|
|
|
|
|
|
var fileBytes = await _directoryService.ReadFileAsync(zipPath);
|
|
|
|
_directoryService.ClearAndDeleteDirectory(tempLocation); // NOTE: For sending back just zip, just schedule this to be called after the file is returned or let next temp storage cleanup take care of it
|
|
(new FileInfo(zipPath)).Delete();
|
|
|
|
return Tuple.Create(fileBytes, zipPath);
|
|
}
|
|
|
|
|
|
/// <summary>
|
|
/// Test if the archive path exists and an archive
|
|
/// </summary>
|
|
/// <param name="archivePath"></param>
|
|
/// <returns></returns>
|
|
public bool IsValidArchive(string archivePath)
|
|
{
|
|
if (!File.Exists(archivePath))
|
|
{
|
|
_logger.LogWarning("Archive {ArchivePath} could not be found", archivePath);
|
|
return false;
|
|
}
|
|
|
|
if (Parser.Parser.IsArchive(archivePath) || Parser.Parser.IsEpub(archivePath)) return true;
|
|
|
|
_logger.LogWarning("Archive {ArchivePath} is not a valid archive", archivePath);
|
|
return false;
|
|
}
|
|
|
|
private static bool ValidComicInfoArchiveEntry(string fullName, string name)
|
|
{
|
|
var filenameWithoutExtension = Path.GetFileNameWithoutExtension(name).ToLower();
|
|
return !Parser.Parser.HasBlacklistedFolderInPath(fullName)
|
|
&& filenameWithoutExtension.Equals(ComicInfoFilename, StringComparison.InvariantCultureIgnoreCase)
|
|
&& !filenameWithoutExtension.StartsWith(Parser.Parser.MacOsMetadataFileStartsWith)
|
|
&& Parser.Parser.IsXml(name);
|
|
}
|
|
|
|
/// <summary>
|
|
/// This can be null if nothing is found or any errors occur during access
|
|
/// </summary>
|
|
/// <param name="archivePath"></param>
|
|
/// <returns></returns>
|
|
public ComicInfo? GetComicInfo(string archivePath)
|
|
{
|
|
if (!IsValidArchive(archivePath)) return null;
|
|
|
|
try
|
|
{
|
|
if (!File.Exists(archivePath)) return null;
|
|
|
|
var libraryHandler = CanOpen(archivePath);
|
|
switch (libraryHandler)
|
|
{
|
|
case ArchiveLibrary.Default:
|
|
{
|
|
using var archive = ZipFile.OpenRead(archivePath);
|
|
|
|
var entry = archive.Entries.FirstOrDefault(x => ValidComicInfoArchiveEntry(x.FullName, x.Name));
|
|
if (entry != null)
|
|
{
|
|
using var stream = entry.Open();
|
|
var serializer = new XmlSerializer(typeof(ComicInfo));
|
|
var info = (ComicInfo) serializer.Deserialize(stream);
|
|
ComicInfo.CleanComicInfo(info);
|
|
return info;
|
|
}
|
|
|
|
break;
|
|
}
|
|
case ArchiveLibrary.SharpCompress:
|
|
{
|
|
using var archive = ArchiveFactory.Open(archivePath);
|
|
var entry = archive.Entries.FirstOrDefault(entry =>
|
|
ValidComicInfoArchiveEntry(Path.GetDirectoryName(entry.Key), entry.Key));
|
|
|
|
if (entry != null)
|
|
{
|
|
using var stream = entry.OpenEntryStream();
|
|
var serializer = new XmlSerializer(typeof(ComicInfo));
|
|
var info = (ComicInfo) serializer.Deserialize(stream);
|
|
ComicInfo.CleanComicInfo(info);
|
|
return info;
|
|
}
|
|
|
|
break;
|
|
}
|
|
case ArchiveLibrary.NotSupported:
|
|
_logger.LogWarning("[GetComicInfo] This archive cannot be read: {ArchivePath}", archivePath);
|
|
return null;
|
|
default:
|
|
_logger.LogWarning(
|
|
"[GetComicInfo] There was an exception when reading archive stream: {ArchivePath}",
|
|
archivePath);
|
|
return null;
|
|
}
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
_logger.LogWarning(ex, "[GetComicInfo] There was an exception when reading archive stream: {Filepath}", archivePath);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
|
|
private void ExtractArchiveEntities(IEnumerable<IArchiveEntry> entries, string extractPath)
|
|
{
|
|
_directoryService.ExistOrCreate(extractPath);
|
|
foreach (var entry in entries)
|
|
{
|
|
entry.WriteToDirectory(extractPath, new ExtractionOptions()
|
|
{
|
|
ExtractFullPath = true, // Don't flatten, let the flatterner ensure correct order of nested folders
|
|
Overwrite = false
|
|
});
|
|
}
|
|
}
|
|
|
|
private void ExtractArchiveEntries(ZipArchive archive, string extractPath)
|
|
{
|
|
// TODO: In cases where we try to extract, but there are InvalidPathChars, we need to inform the user (throw exception, let middleware inform user)
|
|
var needsFlattening = ArchiveNeedsFlattening(archive);
|
|
if (!archive.HasFiles() && !needsFlattening) return;
|
|
|
|
archive.ExtractToDirectory(extractPath, true);
|
|
if (!needsFlattening) return;
|
|
|
|
_logger.LogDebug("Extracted archive is nested in root folder, flattening...");
|
|
_directoryService.Flatten(extractPath);
|
|
}
|
|
|
|
/// <summary>
|
|
/// Extracts an archive to a temp cache directory. Returns path to new directory. If temp cache directory already exists,
|
|
/// will return that without performing an extraction. Returns empty string if there are any invalidations which would
|
|
/// prevent operations to perform correctly (missing archivePath file, empty archive, etc).
|
|
/// </summary>
|
|
/// <param name="archivePath">A valid file to an archive file.</param>
|
|
/// <param name="extractPath">Path to extract to</param>
|
|
/// <returns></returns>
|
|
public void ExtractArchive(string archivePath, string extractPath)
|
|
{
|
|
if (!IsValidArchive(archivePath)) return;
|
|
|
|
if (Directory.Exists(extractPath)) return;
|
|
|
|
if (!_directoryService.FileSystem.File.Exists(archivePath))
|
|
{
|
|
_logger.LogError("{Archive} does not exist on disk", archivePath);
|
|
throw new KavitaException($"{archivePath} does not exist on disk");
|
|
}
|
|
|
|
var sw = Stopwatch.StartNew();
|
|
|
|
try
|
|
{
|
|
var libraryHandler = CanOpen(archivePath);
|
|
switch (libraryHandler)
|
|
{
|
|
case ArchiveLibrary.Default:
|
|
{
|
|
using var archive = ZipFile.OpenRead(archivePath);
|
|
ExtractArchiveEntries(archive, extractPath);
|
|
break;
|
|
}
|
|
case ArchiveLibrary.SharpCompress:
|
|
{
|
|
using var archive = ArchiveFactory.Open(archivePath);
|
|
ExtractArchiveEntities(archive.Entries.Where(entry => !entry.IsDirectory
|
|
&& !Parser.Parser.HasBlacklistedFolderInPath(Path.GetDirectoryName(entry.Key) ?? string.Empty)
|
|
&& Parser.Parser.IsImage(entry.Key)), extractPath);
|
|
break;
|
|
}
|
|
case ArchiveLibrary.NotSupported:
|
|
_logger.LogWarning("[ExtractArchive] This archive cannot be read: {ArchivePath}", archivePath);
|
|
return;
|
|
default:
|
|
_logger.LogWarning("[ExtractArchive] There was an exception when reading archive stream: {ArchivePath}", archivePath);
|
|
return;
|
|
}
|
|
|
|
}
|
|
catch (Exception e)
|
|
{
|
|
_logger.LogWarning(e, "[ExtractArchive] There was a problem extracting {ArchivePath} to {ExtractPath}",archivePath, extractPath);
|
|
return;
|
|
}
|
|
_logger.LogDebug("Extracted archive to {ExtractPath} in {ElapsedMilliseconds} milliseconds", extractPath, sw.ElapsedMilliseconds);
|
|
}
|
|
}
|
|
}
|