Change chunk size to be a fixed 50 to validate if it's causing issue with refresh. Added some try catches to see if exceptions are causing issues. (#681)

This commit is contained in:
Joseph Milazzo 2021-10-16 11:50:34 -07:00 committed by GitHub
parent f618c3443b
commit 1d80420155
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 29 additions and 20 deletions

View File

@ -455,15 +455,15 @@ namespace API.Data.Repositories
// TODO: Think about making this bigger depending on number of files a user has in said library // TODO: Think about making this bigger depending on number of files a user has in said library
// and number of cores and amount of memory. We can then make an optimal choice // and number of cores and amount of memory. We can then make an optimal choice
var totalSeries = await GetSeriesCount(libraryId); var totalSeries = await GetSeriesCount(libraryId);
var procCount = Math.Max(Environment.ProcessorCount - 1, 1); // var procCount = Math.Max(Environment.ProcessorCount - 1, 1);
//
if (totalSeries < procCount * 2 || totalSeries < 50) // if (totalSeries < procCount * 2 || totalSeries < 50)
{ // {
return new Tuple<int, int>(totalSeries, totalSeries); // return new Tuple<int, int>(totalSeries, totalSeries);
} // }
//
// return new Tuple<int, int>(totalSeries, Math.Max(totalSeries / procCount, 50));
return new Tuple<int, int>(totalSeries, Math.Max(totalSeries / procCount, 50)); return new Tuple<int, int>(totalSeries, 50);
} }
public async Task<Chunk> GetChunkInfo(int libraryId = 0) public async Task<Chunk> GetChunkInfo(int libraryId = 0)

View File

@ -1,3 +1,4 @@
using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Diagnostics; using System.Diagnostics;
using System.IO; using System.IO;
@ -217,9 +218,10 @@ namespace API.Services
var stopwatch = Stopwatch.StartNew(); var stopwatch = Stopwatch.StartNew();
var totalTime = 0L; var totalTime = 0L;
_logger.LogInformation("[MetadataService] Refreshing Library {LibraryName}. Total Items: {TotalSize}. Total Chunks: {TotalChunks} with {ChunkSize} size", library.Name, chunkInfo.TotalSize, chunkInfo.TotalChunks, chunkInfo.ChunkSize); _logger.LogInformation("[MetadataService] Refreshing Library {LibraryName}. Total Items: {TotalSize}. Total Chunks: {TotalChunks} with {ChunkSize} size", library.Name, chunkInfo.TotalSize, chunkInfo.TotalChunks, chunkInfo.ChunkSize);
// This technically does
for (var chunk = 1; chunk <= chunkInfo.TotalChunks; chunk++) for (var chunk = 1; chunk <= chunkInfo.TotalChunks; chunk++)
{ {
if (chunkInfo.TotalChunks == 0) continue;
totalTime += stopwatch.ElapsedMilliseconds; totalTime += stopwatch.ElapsedMilliseconds;
stopwatch.Restart(); stopwatch.Restart();
_logger.LogInformation("[MetadataService] Processing chunk {ChunkNumber} / {TotalChunks} with size {ChunkSize}. Series ({SeriesStart} - {SeriesEnd}", _logger.LogInformation("[MetadataService] Processing chunk {ChunkNumber} / {TotalChunks} with size {ChunkSize}. Series ({SeriesStart} - {SeriesEnd}",
@ -230,8 +232,10 @@ namespace API.Services
PageNumber = chunk, PageNumber = chunk,
PageSize = chunkInfo.ChunkSize PageSize = chunkInfo.ChunkSize
}); });
_logger.LogDebug($"[MetadataService] Fetched {nonLibrarySeries.Count} series for refresh"); _logger.LogDebug("[MetadataService] Fetched {SeriesCount} series for refresh", nonLibrarySeries.Count);
Parallel.ForEach(nonLibrarySeries, series => Parallel.ForEach(nonLibrarySeries, series =>
{
try
{ {
_logger.LogDebug("[MetadataService] Processing series {SeriesName}", series.OriginalName); _logger.LogDebug("[MetadataService] Processing series {SeriesName}", series.OriginalName);
var volumeUpdated = false; var volumeUpdated = false;
@ -247,6 +251,11 @@ namespace API.Services
} }
UpdateMetadata(series, volumeUpdated || forceUpdate); UpdateMetadata(series, volumeUpdated || forceUpdate);
}
catch (Exception)
{
/* Swallow exception */
}
}); });
if (_unitOfWork.HasChanges() && await _unitOfWork.CommitAsync()) if (_unitOfWork.HasChanges() && await _unitOfWork.CommitAsync())