mirror of
https://github.com/Kareadita/Kavita.git
synced 2025-07-09 03:04:19 -04:00
Change chunk size to be a fixed 50 to validate if it's causing issue with refresh. Added some try catches to see if exceptions are causing issues. (#681)
This commit is contained in:
parent
f618c3443b
commit
1d80420155
@ -455,15 +455,15 @@ namespace API.Data.Repositories
|
||||
// TODO: Think about making this bigger depending on number of files a user has in said library
|
||||
// and number of cores and amount of memory. We can then make an optimal choice
|
||||
var totalSeries = await GetSeriesCount(libraryId);
|
||||
var procCount = Math.Max(Environment.ProcessorCount - 1, 1);
|
||||
|
||||
if (totalSeries < procCount * 2 || totalSeries < 50)
|
||||
{
|
||||
return new Tuple<int, int>(totalSeries, totalSeries);
|
||||
}
|
||||
|
||||
|
||||
return new Tuple<int, int>(totalSeries, Math.Max(totalSeries / procCount, 50));
|
||||
// var procCount = Math.Max(Environment.ProcessorCount - 1, 1);
|
||||
//
|
||||
// if (totalSeries < procCount * 2 || totalSeries < 50)
|
||||
// {
|
||||
// return new Tuple<int, int>(totalSeries, totalSeries);
|
||||
// }
|
||||
//
|
||||
// return new Tuple<int, int>(totalSeries, Math.Max(totalSeries / procCount, 50));
|
||||
return new Tuple<int, int>(totalSeries, 50);
|
||||
}
|
||||
|
||||
public async Task<Chunk> GetChunkInfo(int libraryId = 0)
|
||||
|
@ -1,3 +1,4 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Diagnostics;
|
||||
using System.IO;
|
||||
@ -217,9 +218,10 @@ namespace API.Services
|
||||
var stopwatch = Stopwatch.StartNew();
|
||||
var totalTime = 0L;
|
||||
_logger.LogInformation("[MetadataService] Refreshing Library {LibraryName}. Total Items: {TotalSize}. Total Chunks: {TotalChunks} with {ChunkSize} size", library.Name, chunkInfo.TotalSize, chunkInfo.TotalChunks, chunkInfo.ChunkSize);
|
||||
// This technically does
|
||||
|
||||
for (var chunk = 1; chunk <= chunkInfo.TotalChunks; chunk++)
|
||||
{
|
||||
if (chunkInfo.TotalChunks == 0) continue;
|
||||
totalTime += stopwatch.ElapsedMilliseconds;
|
||||
stopwatch.Restart();
|
||||
_logger.LogInformation("[MetadataService] Processing chunk {ChunkNumber} / {TotalChunks} with size {ChunkSize}. Series ({SeriesStart} - {SeriesEnd}",
|
||||
@ -230,23 +232,30 @@ namespace API.Services
|
||||
PageNumber = chunk,
|
||||
PageSize = chunkInfo.ChunkSize
|
||||
});
|
||||
_logger.LogDebug($"[MetadataService] Fetched {nonLibrarySeries.Count} series for refresh");
|
||||
_logger.LogDebug("[MetadataService] Fetched {SeriesCount} series for refresh", nonLibrarySeries.Count);
|
||||
Parallel.ForEach(nonLibrarySeries, series =>
|
||||
{
|
||||
_logger.LogDebug("[MetadataService] Processing series {SeriesName}", series.OriginalName);
|
||||
var volumeUpdated = false;
|
||||
foreach (var volume in series.Volumes)
|
||||
try
|
||||
{
|
||||
var chapterUpdated = false;
|
||||
foreach (var chapter in volume.Chapters)
|
||||
_logger.LogDebug("[MetadataService] Processing series {SeriesName}", series.OriginalName);
|
||||
var volumeUpdated = false;
|
||||
foreach (var volume in series.Volumes)
|
||||
{
|
||||
chapterUpdated = UpdateMetadata(chapter, forceUpdate);
|
||||
var chapterUpdated = false;
|
||||
foreach (var chapter in volume.Chapters)
|
||||
{
|
||||
chapterUpdated = UpdateMetadata(chapter, forceUpdate);
|
||||
}
|
||||
|
||||
volumeUpdated = UpdateMetadata(volume, chapterUpdated || forceUpdate);
|
||||
}
|
||||
|
||||
volumeUpdated = UpdateMetadata(volume, chapterUpdated || forceUpdate);
|
||||
UpdateMetadata(series, volumeUpdated || forceUpdate);
|
||||
}
|
||||
catch (Exception)
|
||||
{
|
||||
/* Swallow exception */
|
||||
}
|
||||
|
||||
UpdateMetadata(series, volumeUpdated || forceUpdate);
|
||||
});
|
||||
|
||||
if (_unitOfWork.HasChanges() && await _unitOfWork.CommitAsync())
|
||||
|
Loading…
x
Reference in New Issue
Block a user