mirror of
https://github.com/Kareadita/Kavita.git
synced 2025-05-31 04:04:19 -04:00
* Added book filetype detection and reorganized tests due to size of file * Added ability to get basic Parse Info from Book and Pages. * We can now scan books and get them in a library with cover images. * Take the first image in the epub if the cover isn't set. * Implemented the ability to unzip the ebup to cache. Implemented a test api to load html files. * Just some test code to figure out how to approach this. * Fixed some merge conflicts * Removed some dead code from merge * Snapshot: I can now load everything properly into the UI by rewriting the urls before I send them back. I don't notice any lag from this method. It can be optimized further. * Implemented a way to load the content in the browser not via an iframe. * Added a note * Anchor mappings is complete. New anchors are updated so references now resolve to javascript:void() for UI to take care of internally loading and the appropriate page is mapped to it. Anchors that are external have target="_blank" added so they don't force you out of the app and styles are of course inlined. * Oops i need this * Table of contents api implemented (rough) and some small enhancements to codebase for books. * GetBookPageResources now only loads files from within the book. Nested chapter list support and images now use html parsing instead of string parsing. * Fonts now are remapped to load from endpoint. * book-resources now uses a key, ensuring the file is in proper format for lookup. Changed chapter list based on structure with one HEADER and nested chapters. * Properly handle svg resource requests and when there are part anchors that are clickable, make sure we handle them in the UI by adding a kavita-page handler. * Add Chapter group page even if one isn't set by using first page (without part) from nestedChildren. * Added extra debug code for issue #163. * Added new user preferences for books and updated the css so we scope it to our reading section. * Cleaned up style code * Implemented ability to save book preferences and some cleanup on existing apis. * Added an api for checking if a user has read something in a library type before. * Forgot to make sure the has reading progress is against a user lol. * Remove cacheservice code for books, sine we use an in-memory method * Handle svg images as well * Enhanced cover image extraction to check for a "cover" image if the cover image wasn't set in OPF before falling back to the first image. * Fixed an issue with special books not properly generating metadata due to not having filename set. * Cleanup, removed warmup task code from statup/program and changed taskscheduler to schedule tasks on startup only (or if tasks are changed from UI). * Code cleanup * Code cleanup * So much code. Lots of refactors to try to test scanner service. Moved a lot of the queries into Extensions to allow to easier test, even though it's hacky. Support @font-face src:url swaps with ' and ". Source summary information from epubs. * Well...baseURL needs to come from BE and not from UI lol. * Adjusted migrations so default values match Entity * Removed comment * I think I finally fixed #163! The issue was that when i checked if it had a parserInfo, i wasn't considering that the chapter range might have a - in it (0-6) and so when the code to check if range could parse out a number failed, it treated it like a special and checked range against info's filename. * Some bugfixes * Lots of testing, extracting code to make it easier to test. This code is buggy, but fixed a bug where 1) If we changed the normalization code, we would remove the whole db during a scan and 2) We weren't actually removing series properly. Other than that, code is being extracted to remove duplication and centralize logic. * More code cleanup and test cleanup to ensure scan loop is working as expected and matches expectaions from tests. * Cleaned up the code and made it so if I change normalization, which I do in this branch, it wont break existing DBs. * Some comic parser changes for partial chapter support. * Added some code for directory service and scanner service along with python code to generate test files (not used yet). Fixed up all the tests. * Code smells
104 lines
4.1 KiB
C#
104 lines
4.1 KiB
C#
using System.Collections.Generic;
|
|
using System.IO;
|
|
using System.Linq;
|
|
using API.Services;
|
|
using API.Tests.Helpers;
|
|
using Microsoft.Extensions.Logging;
|
|
using NSubstitute;
|
|
using Xunit;
|
|
|
|
namespace API.Tests.Services
|
|
{
|
|
|
|
public class DirectoryServiceTests
|
|
{
|
|
private readonly DirectoryService _directoryService;
|
|
private readonly ILogger<DirectoryService> _logger = Substitute.For<ILogger<DirectoryService>>();
|
|
|
|
public DirectoryServiceTests()
|
|
{
|
|
_directoryService = new DirectoryService(_logger);
|
|
}
|
|
|
|
[Theory]
|
|
[InlineData("Manga-testcase.txt", 28)]
|
|
public void GetFilesTest(string file, int expectedFileCount)
|
|
{
|
|
var testDirectory = Path.Join(Directory.GetCurrentDirectory(), "../../../Services/Test Data/ScannerService/Manga");
|
|
var files = new List<string>();
|
|
var fileCount = DirectoryService.TraverseTreeParallelForEach(testDirectory, s => files.Add(s),
|
|
API.Parser.Parser.ArchiveFileExtensions, _logger);
|
|
|
|
Assert.Equal(expectedFileCount, fileCount);
|
|
}
|
|
|
|
[Fact]
|
|
public void GetFiles_WithCustomRegex_ShouldPass_Test()
|
|
{
|
|
var testDirectory = Path.Join(Directory.GetCurrentDirectory(), "../../../Services/Test Data/DirectoryService/regex");
|
|
var files = _directoryService.GetFiles(testDirectory, @"file\d*.txt");
|
|
Assert.Equal(2, files.Count());
|
|
}
|
|
|
|
[Fact]
|
|
public void GetFiles_TopLevel_ShouldBeEmpty_Test()
|
|
{
|
|
var testDirectory = Path.Join(Directory.GetCurrentDirectory(), "../../../Services/Test Data/DirectoryService");
|
|
var files = _directoryService.GetFiles(testDirectory);
|
|
Assert.Empty(files);
|
|
}
|
|
|
|
[Fact]
|
|
public void GetFilesWithExtensions_ShouldBeEmpty_Test()
|
|
{
|
|
var testDirectory = Path.Join(Directory.GetCurrentDirectory(), "../../../Services/Test Data/DirectoryService/extensions");
|
|
var files = _directoryService.GetFiles(testDirectory, "*.txt");
|
|
Assert.Empty(files);
|
|
}
|
|
|
|
[Fact]
|
|
public void GetFilesWithExtensions_Test()
|
|
{
|
|
var testDirectory = Path.Join(Directory.GetCurrentDirectory(), "../../../Services/Test Data/DirectoryService/extension");
|
|
var files = _directoryService.GetFiles(testDirectory, ".cbz|.rar");
|
|
Assert.Equal(3, files.Count());
|
|
}
|
|
|
|
[Fact]
|
|
public void GetFilesWithExtensions_BadDirectory_ShouldBeEmpty_Test()
|
|
{
|
|
var testDirectory = Path.Join(Directory.GetCurrentDirectory(), "../../../Services/Test Data/DirectoryService/doesntexist");
|
|
var files = _directoryService.GetFiles(testDirectory, ".cbz|.rar");
|
|
Assert.Empty(files);
|
|
}
|
|
|
|
[Fact]
|
|
public void ListDirectory_SubDirectory_Test()
|
|
{
|
|
var testDirectory = Path.Join(Directory.GetCurrentDirectory(), "../../../Services/Test Data/DirectoryService/");
|
|
var dirs = _directoryService.ListDirectory(testDirectory);
|
|
Assert.Contains(dirs, s => s.Contains("regex"));
|
|
|
|
}
|
|
|
|
[Fact]
|
|
public void ListDirectory_NoSubDirectory_Test()
|
|
{
|
|
var dirs = _directoryService.ListDirectory("");
|
|
Assert.DoesNotContain(dirs, s => s.Contains("regex"));
|
|
|
|
}
|
|
|
|
[Theory]
|
|
[InlineData("C:/Manga/", "C:/Manga/Love Hina/Specials/Omake/", "Omake,Specials,Love Hina")]
|
|
[InlineData("C:/Manga/", "C:/Manga/Love Hina/Specials/Omake", "Omake,Specials,Love Hina")]
|
|
[InlineData("C:/Manga", "C:/Manga/Love Hina/Specials/Omake/", "Omake,Specials,Love Hina")]
|
|
[InlineData("C:/Manga", @"C:\Manga\Love Hina\Specials\Omake\", "Omake,Specials,Love Hina")]
|
|
[InlineData(@"/manga/", @"/manga/Love Hina/Specials/Omake/", "Omake,Specials,Love Hina")]
|
|
public void GetFoldersTillRoot_Test(string rootPath, string fullpath, string expectedArray)
|
|
{
|
|
var expected = expectedArray.Split(",");
|
|
Assert.Equal(expected, DirectoryService.GetFoldersTillRoot(rootPath, fullpath));
|
|
}
|
|
}
|
|
} |