Kavita/API/Services/CacheService.cs
Joe Milazzo 2464a30bc2
Manga Reader Work (#1729)
* Instead of augmenting prefetcher to move across chapter bounds, let's try to instead just load 5 images (which the browser will cache) from next/prev so when it loads, it's much faster.

* Trialing loading next/prev chapters 5 pages to have better next page loading experience.

* Tweaked GetChapterInfo API to actually apply conditional includeDimensions parameter.

* added a basic language file for upcoming work

* Moved the bottom menu up a bit for iOS devices with handlebars.

* Fixed fit to width on phones still having a horizontal scrollbar

* Fixed a bug where there is extra space under the image when fit to width and on a phone due to pagination going to far.

* Changed which variable we use for right pagination calculation

* Fixing fit to height

- Fixing height calc to account for horizontal scroll bar height.

* Added a comment for the height scrollbar fix

* Adding screenfull package

# Added:
- Added screenfull package to handle cross-platform browser fullscreen code

# Removed:
- Removed custom fullscreen code

* Fixed a bug where switching from webtoon reader to other layout modes wouldn't render anything. Webtoon continuous scroll down is now broken.

* Fixed it back to how it was and all is good. Need to call detectChanges explicitly.

* Removed an additional undeeded save progress call on loadPage

* Laid out the test case to move the page snapping to the backend with full unit tests. Current code is broken just like UI layer.

* Refactored the snap points into the backend and ensure that it works correctly.

* Fixed a broken unit test

* Filter out spammy hubs/messages calls in the logs

* Swallow all noisy messages that are from RequestLoggingMiddleware when the log level is on Information or above.

* Added a common loading component to the app. Have yet to refactor all screens to use this.

* Bump json5 from 2.2.0 to 2.2.3 in /UI/Web

Bumps [json5](https://github.com/json5/json5) from 2.2.0 to 2.2.3.
- [Release notes](https://github.com/json5/json5/releases)
- [Changelog](https://github.com/json5/json5/blob/main/CHANGELOG.md)
- [Commits](https://github.com/json5/json5/compare/v2.2.0...v2.2.3)

---
updated-dependencies:
- dependency-name: json5
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

* Alrigned all the loading messages and styles throughout the app

* Webtoon reader will use max width of all images to ensure images align well.

* On Original scaling mode, users can use the keyboard to scroll around the images without pagination kicking off.

* Removed console logs

* Fixed a public vs private issue

* Fixed an issue around some cached files getting locked due to NetVips holding them during file size calculations.

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Robbie Davis <robbie@therobbiedavis.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-01-07 09:14:22 -06:00

307 lines
12 KiB
C#

using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using API.Comparators;
using API.Data;
using API.DTOs.Reader;
using API.Entities;
using API.Entities.Enums;
using API.Extensions;
using Kavita.Common;
using Microsoft.Extensions.Logging;
using NetVips;
namespace API.Services;
public interface ICacheService
{
/// <summary>
/// Ensures the cache is created for the given chapter and if not, will create it. Should be called before any other
/// cache operations (except cleanup).
/// </summary>
/// <param name="chapterId"></param>
/// <param name="extractPdfToImages">Extracts a PDF into images for a different reading experience</param>
/// <returns>Chapter for the passed chapterId. Side-effect from ensuring cache.</returns>
Task<Chapter> Ensure(int chapterId, bool extractPdfToImages = false);
/// <summary>
/// Clears cache directory of all volumes. This can be invoked from deleting a library or a series.
/// </summary>
/// <param name="chapterIds">Volumes that belong to that library. Assume the library might have been deleted before this invocation.</param>
void CleanupChapters(IEnumerable<int> chapterIds);
void CleanupBookmarks(IEnumerable<int> seriesIds);
string GetCachedPagePath(int chapterId, int page);
IEnumerable<FileDimensionDto> GetCachedFileDimensions(int chapterId);
string GetCachedBookmarkPagePath(int seriesId, int page);
string GetCachedFile(Chapter chapter);
public void ExtractChapterFiles(string extractPath, IReadOnlyList<MangaFile> files, bool extractPdfImages = false);
Task<int> CacheBookmarkForSeries(int userId, int seriesId);
void CleanupBookmarkCache(int seriesId);
}
public class CacheService : ICacheService
{
private readonly ILogger<CacheService> _logger;
private readonly IUnitOfWork _unitOfWork;
private readonly IDirectoryService _directoryService;
private readonly IReadingItemService _readingItemService;
private readonly IBookmarkService _bookmarkService;
public CacheService(ILogger<CacheService> logger, IUnitOfWork unitOfWork,
IDirectoryService directoryService, IReadingItemService readingItemService,
IBookmarkService bookmarkService)
{
_logger = logger;
_unitOfWork = unitOfWork;
_directoryService = directoryService;
_readingItemService = readingItemService;
_bookmarkService = bookmarkService;
}
public IEnumerable<FileDimensionDto> GetCachedFileDimensions(int chapterId)
{
var sw = Stopwatch.StartNew();
var path = GetCachePath(chapterId);
var files = _directoryService.GetFilesWithExtension(path, Tasks.Scanner.Parser.Parser.ImageFileExtensions)
.OrderByNatural(Path.GetFileNameWithoutExtension)
.ToArray();
if (files.Length == 0)
{
return ArraySegment<FileDimensionDto>.Empty;
}
var dimensions = new List<FileDimensionDto>();
var originalCacheSize = Cache.MaxFiles;
try
{
Cache.MaxFiles = 0;
for (var i = 0; i < files.Length; i++)
{
var file = files[i];
using var image = Image.NewFromFile(file, memory: false, access: Enums.Access.SequentialUnbuffered);
dimensions.Add(new FileDimensionDto()
{
PageNumber = i,
Height = image.Height,
Width = image.Width,
IsWide = image.Width > image.Height,
FileName = file.Replace(path, string.Empty)
});
}
}
catch (Exception ex)
{
_logger.LogError("There was an error calculating image dimensions for {ChapterId}", chapterId);
}
finally
{
Cache.MaxFiles = originalCacheSize;
}
_logger.LogDebug("File Dimensions call for {Length} images took {Time}ms", dimensions.Count, sw.ElapsedMilliseconds);
return dimensions;
}
public string GetCachedBookmarkPagePath(int seriesId, int page)
{
// Calculate what chapter the page belongs to
var path = GetBookmarkCachePath(seriesId);
var files = _directoryService.GetFilesWithExtension(path, Tasks.Scanner.Parser.Parser.ImageFileExtensions);
files = files
.AsEnumerable()
.OrderByNatural(Path.GetFileNameWithoutExtension)
.ToArray();
if (files.Length == 0)
{
return string.Empty;
}
// Since array is 0 based, we need to keep that in account (only affects last image)
return page == files.Length ? files.ElementAt(page - 1) : files.ElementAt(page);
}
/// <summary>
/// Returns the full path to the cached file. If the file does not exist, will fallback to the original.
/// </summary>
/// <param name="chapter"></param>
/// <returns></returns>
public string GetCachedFile(Chapter chapter)
{
var extractPath = GetCachePath(chapter.Id);
var path = Path.Join(extractPath, _directoryService.FileSystem.Path.GetFileName(chapter.Files.First().FilePath));
if (!(_directoryService.FileSystem.FileInfo.FromFileName(path).Exists))
{
path = chapter.Files.First().FilePath;
}
return path;
}
public async Task<Chapter> Ensure(int chapterId, bool extractPdfToImages = false)
{
_directoryService.ExistOrCreate(_directoryService.CacheDirectory);
var chapter = await _unitOfWork.ChapterRepository.GetChapterAsync(chapterId);
var extractPath = GetCachePath(chapterId);
if (_directoryService.Exists(extractPath)) return chapter;
var files = chapter?.Files.ToList();
ExtractChapterFiles(extractPath, files, extractPdfToImages);
return chapter;
}
/// <summary>
/// This is an internal method for cache service for extracting chapter files to disk. The code is structured
/// for cache service, but can be re-used (download bookmarks)
/// </summary>
/// <param name="extractPath"></param>
/// <param name="files"></param>
/// <param name="extractPdfImages">Defaults to false, if true, will extract the images from the PDF renderer and not move the pdf file</param>
/// <returns></returns>
public void ExtractChapterFiles(string extractPath, IReadOnlyList<MangaFile> files, bool extractPdfImages = false)
{
var removeNonImages = true;
var fileCount = files.Count;
var extraPath = "";
var extractDi = _directoryService.FileSystem.DirectoryInfo.FromDirectoryName(extractPath);
if (files.Count > 0 && files[0].Format == MangaFormat.Image)
{
_readingItemService.Extract(files[0].FilePath, extractPath, MangaFormat.Image, files.Count);
_directoryService.Flatten(extractDi.FullName);
}
foreach (var file in files)
{
if (fileCount > 1)
{
extraPath = file.Id + string.Empty;
}
switch (file.Format)
{
case MangaFormat.Archive:
_readingItemService.Extract(file.FilePath, Path.Join(extractPath, extraPath), file.Format);
break;
case MangaFormat.Epub:
case MangaFormat.Pdf:
{
if (!_directoryService.FileSystem.File.Exists(files[0].FilePath))
{
_logger.LogError("{File} does not exist on disk", files[0].FilePath);
throw new KavitaException($"{files[0].FilePath} does not exist on disk");
}
if (extractPdfImages)
{
_readingItemService.Extract(file.FilePath, Path.Join(extractPath, extraPath), file.Format);
break;
}
removeNonImages = false;
_directoryService.ExistOrCreate(extractPath);
_directoryService.CopyFileToDirectory(files[0].FilePath, extractPath);
break;
}
}
}
_directoryService.Flatten(extractDi.FullName);
if (removeNonImages)
{
_directoryService.RemoveNonImages(extractDi.FullName);
}
}
/// <summary>
/// Removes the cached files and folders for a set of chapterIds
/// </summary>
/// <param name="chapterIds"></param>
public void CleanupChapters(IEnumerable<int> chapterIds)
{
foreach (var chapter in chapterIds)
{
_directoryService.ClearAndDeleteDirectory(GetCachePath(chapter));
}
}
/// <summary>
/// Removes the cached files and folders for a set of chapterIds
/// </summary>
/// <param name="seriesIds"></param>
public void CleanupBookmarks(IEnumerable<int> seriesIds)
{
foreach (var series in seriesIds)
{
_directoryService.ClearAndDeleteDirectory(GetBookmarkCachePath(series));
}
}
/// <summary>
/// Returns the cache path for a given Chapter. Should be cacheDirectory/{chapterId}/
/// </summary>
/// <param name="chapterId"></param>
/// <returns></returns>
private string GetCachePath(int chapterId)
{
return _directoryService.FileSystem.Path.GetFullPath(_directoryService.FileSystem.Path.Join(_directoryService.CacheDirectory, $"{chapterId}/"));
}
private string GetBookmarkCachePath(int seriesId)
{
return _directoryService.FileSystem.Path.GetFullPath(_directoryService.FileSystem.Path.Join(_directoryService.CacheDirectory, $"{seriesId}_bookmarks/"));
}
/// <summary>
/// Returns the absolute path of a cached page.
/// </summary>
/// <param name="chapterId">Chapter id with Files populated.</param>
/// <param name="page">Page number to look for</param>
/// <returns>Page filepath or empty if no files found.</returns>
public string GetCachedPagePath(int chapterId, int page)
{
// Calculate what chapter the page belongs to
var path = GetCachePath(chapterId);
// NOTE: We can optimize this by extracting and renaming, so we don't need to scan for the files and can do a direct access
var files = _directoryService.GetFilesWithExtension(path, Tasks.Scanner.Parser.Parser.ImageFileExtensions)
.OrderByNatural(Path.GetFileNameWithoutExtension)
.ToArray();
if (files.Length == 0)
{
return string.Empty;
}
if (page > files.Length) page = files.Length;
// Since array is 0 based, we need to keep that in account (only affects last image)
return page == files.Length ? files.ElementAt(page - 1) : files.ElementAt(page);
}
public async Task<int> CacheBookmarkForSeries(int userId, int seriesId)
{
var destDirectory = _directoryService.FileSystem.Path.Join(_directoryService.CacheDirectory, seriesId + "_bookmarks");
if (_directoryService.Exists(destDirectory)) return _directoryService.GetFiles(destDirectory).Count();
var bookmarkDtos = await _unitOfWork.UserRepository.GetBookmarkDtosForSeries(userId, seriesId);
var files = (await _bookmarkService.GetBookmarkFilesById(bookmarkDtos.Select(b => b.Id))).ToList();
_directoryService.CopyFilesToDirectory(files, destDirectory,
Enumerable.Range(1, files.Count).Select(i => i + string.Empty).ToList());
return files.Count;
}
/// <summary>
/// Clears a cached bookmarks for a series id folder
/// </summary>
/// <param name="seriesId"></param>
public void CleanupBookmarkCache(int seriesId)
{
var destDirectory = _directoryService.FileSystem.Path.Join(_directoryService.CacheDirectory, seriesId + "_bookmarks");
if (!_directoryService.Exists(destDirectory)) return;
_directoryService.ClearAndDeleteDirectory(destDirectory);
}
}