mirror of
https://github.com/immich-app/immich.git
synced 2026-04-15 15:11:54 -04:00
* feat: add OCR functionality and related configurations * chore: update labeler configuration for machine learning files * feat(i18n): enhance OCR model descriptions and add orientation classification and unwarping features * chore: update Dockerfile to include ccache for improved build performance * feat(ocr): enhance OCR model configuration with orientation classification and unwarping options, update PaddleOCR integration, and improve response structure * refactor(ocr): remove OCR_CLEANUP job from enum and type definitions * refactor(ocr): remove obsolete OCR entity and migration files, and update asset job status and schema to accommodate new OCR table structure * refactor(ocr): update OCR schema and response structure to use individual coordinates instead of bounding box, and adjust related service and repository files * feat: enhance OCR configuration and functionality - Updated OCR settings to include minimum detection box score, minimum detection score, and minimum recognition score. - Refactored PaddleOCRecognizer to utilize new scoring parameters. - Introduced new database tables for asset OCR data and search functionality. - Modified related services and repositories to support the new OCR features. - Updated translations for improved clarity in settings UI. * sql changes * use rapidocr * change dto * update web * update lock * update api * store positions as normalized floats * match column order in db * update admin ui settings descriptions fix max resolution key set min threshold to 0.1 fix bind * apply config correctly, adjust defaults * unnecessary model type * unnecessary sources * fix(ocr): switch RapidOCR lang type from LangDet to LangRec * fix(ocr): expose lang_type (LangRec.CH) and font_path on OcrOptions for RapidOCR * fix(ocr): make OCR text search case- and accent-insensitive using ILIKE + unaccent * fix(ocr): add OCR search fields * fix: Add OCR database migration and update ML prediction logic. * trigrams are already case insensitive * add tests * format * update migrations * wrong uuid function * linting * maybe fix medium tests * formatting * fix weblate check * openapi * sql * minor fixes * maybe fix medium tests part 2 * passing medium tests * format web * readd sql * format dart * disabled in e2e * chore: translation ordering --------- Co-authored-by: mertalev <101130780+mertalev@users.noreply.github.com> Co-authored-by: Alex Tran <alex.tran1502@gmail.com>
248 lines
7.5 KiB
TypeScript
248 lines
7.5 KiB
TypeScript
import { Injectable } from '@nestjs/common';
|
|
import { Duration } from 'luxon';
|
|
import { readFile } from 'node:fs/promises';
|
|
import { MachineLearningConfig } from 'src/config';
|
|
import { CLIPConfig } from 'src/dtos/model-config.dto';
|
|
import { LoggingRepository } from 'src/repositories/logging.repository';
|
|
|
|
export interface BoundingBox {
|
|
x1: number;
|
|
y1: number;
|
|
x2: number;
|
|
y2: number;
|
|
}
|
|
|
|
export enum ModelTask {
|
|
FACIAL_RECOGNITION = 'facial-recognition',
|
|
SEARCH = 'clip',
|
|
OCR = 'ocr',
|
|
}
|
|
|
|
export enum ModelType {
|
|
DETECTION = 'detection',
|
|
PIPELINE = 'pipeline',
|
|
RECOGNITION = 'recognition',
|
|
TEXTUAL = 'textual',
|
|
VISUAL = 'visual',
|
|
OCR = 'ocr',
|
|
}
|
|
|
|
export type ModelPayload = { imagePath: string } | { text: string };
|
|
|
|
type ModelOptions = { modelName: string };
|
|
|
|
export type FaceDetectionOptions = ModelOptions & { minScore: number };
|
|
export type OcrOptions = ModelOptions & {
|
|
minDetectionScore: number;
|
|
minRecognitionScore: number;
|
|
maxResolution: number;
|
|
};
|
|
type VisualResponse = { imageHeight: number; imageWidth: number };
|
|
export type ClipVisualRequest = { [ModelTask.SEARCH]: { [ModelType.VISUAL]: ModelOptions } };
|
|
export type ClipVisualResponse = { [ModelTask.SEARCH]: string } & VisualResponse;
|
|
|
|
export type ClipTextualRequest = { [ModelTask.SEARCH]: { [ModelType.TEXTUAL]: ModelOptions } };
|
|
export type ClipTextualResponse = { [ModelTask.SEARCH]: string };
|
|
|
|
export type OCR = {
|
|
text: string[];
|
|
box: number[];
|
|
boxScore: number[];
|
|
textScore: number[];
|
|
};
|
|
|
|
export type OcrRequest = {
|
|
[ModelTask.OCR]: {
|
|
[ModelType.DETECTION]: ModelOptions & { options: { minScore: number; maxResolution: number } };
|
|
[ModelType.RECOGNITION]: ModelOptions & { options: { minScore: number } };
|
|
};
|
|
};
|
|
export type OcrResponse = { [ModelTask.OCR]: OCR } & VisualResponse;
|
|
|
|
export type FacialRecognitionRequest = {
|
|
[ModelTask.FACIAL_RECOGNITION]: {
|
|
[ModelType.DETECTION]: ModelOptions & { options: { minScore: number } };
|
|
[ModelType.RECOGNITION]: ModelOptions;
|
|
};
|
|
};
|
|
|
|
export interface Face {
|
|
boundingBox: BoundingBox;
|
|
embedding: string;
|
|
score: number;
|
|
}
|
|
|
|
export type FacialRecognitionResponse = { [ModelTask.FACIAL_RECOGNITION]: Face[] } & VisualResponse;
|
|
export type DetectedFaces = { faces: Face[] } & VisualResponse;
|
|
export type MachineLearningRequest = ClipVisualRequest | ClipTextualRequest | FacialRecognitionRequest | OcrRequest;
|
|
export type TextEncodingOptions = ModelOptions & { language?: string };
|
|
|
|
@Injectable()
|
|
export class MachineLearningRepository {
|
|
private healthyMap: Record<string, boolean> = {};
|
|
private interval?: ReturnType<typeof setInterval>;
|
|
private _config?: MachineLearningConfig;
|
|
|
|
private get config(): MachineLearningConfig {
|
|
if (!this._config) {
|
|
throw new Error('Machine learning repository not been setup');
|
|
}
|
|
|
|
return this._config;
|
|
}
|
|
|
|
constructor(private logger: LoggingRepository) {
|
|
this.logger.setContext(MachineLearningRepository.name);
|
|
}
|
|
|
|
setup(config: MachineLearningConfig) {
|
|
this._config = config;
|
|
this.teardown();
|
|
|
|
// delete old servers
|
|
for (const url of Object.keys(this.healthyMap)) {
|
|
if (!config.urls.includes(url)) {
|
|
delete this.healthyMap[url];
|
|
}
|
|
}
|
|
|
|
if (!config.enabled || !config.availabilityChecks.enabled) {
|
|
return;
|
|
}
|
|
|
|
this.tick();
|
|
this.interval = setInterval(
|
|
() => this.tick(),
|
|
Duration.fromObject({ milliseconds: config.availabilityChecks.interval }).as('milliseconds'),
|
|
);
|
|
}
|
|
|
|
teardown() {
|
|
if (this.interval) {
|
|
clearInterval(this.interval);
|
|
}
|
|
}
|
|
|
|
private tick() {
|
|
for (const url of this.config.urls) {
|
|
void this.check(url);
|
|
}
|
|
}
|
|
|
|
private async check(url: string) {
|
|
let healthy = false;
|
|
try {
|
|
const response = await fetch(new URL('/ping', url), {
|
|
signal: AbortSignal.timeout(this.config.availabilityChecks.timeout),
|
|
});
|
|
if (response.ok) {
|
|
healthy = true;
|
|
}
|
|
} catch {
|
|
// nothing to do here
|
|
}
|
|
|
|
this.setHealthy(url, healthy);
|
|
}
|
|
|
|
private setHealthy(url: string, healthy: boolean) {
|
|
if (this.healthyMap[url] !== healthy) {
|
|
this.logger.log(`Machine learning server became ${healthy ? 'healthy' : 'unhealthy'} (${url}).`);
|
|
}
|
|
|
|
this.healthyMap[url] = healthy;
|
|
}
|
|
|
|
private isHealthy(url: string) {
|
|
if (!this.config.availabilityChecks.enabled) {
|
|
return true;
|
|
}
|
|
|
|
return this.healthyMap[url];
|
|
}
|
|
|
|
private async predict<T>(payload: ModelPayload, config: MachineLearningRequest): Promise<T> {
|
|
const formData = await this.getFormData(payload, config);
|
|
|
|
for (const url of [
|
|
// try healthy servers first
|
|
...this.config.urls.filter((url) => this.isHealthy(url)),
|
|
...this.config.urls.filter((url) => !this.isHealthy(url)),
|
|
]) {
|
|
try {
|
|
const response = await fetch(new URL('/predict', url), { method: 'POST', body: formData });
|
|
if (response.ok) {
|
|
this.setHealthy(url, true);
|
|
return response.json();
|
|
}
|
|
|
|
this.logger.warn(
|
|
`Machine learning request to "${url}" failed with status ${response.status}: ${response.statusText}`,
|
|
);
|
|
} catch (error: Error | unknown) {
|
|
this.logger.warn(
|
|
`Machine learning request to "${url}" failed: ${error instanceof Error ? error.message : error}`,
|
|
);
|
|
}
|
|
|
|
this.setHealthy(url, false);
|
|
}
|
|
|
|
throw new Error(`Machine learning request '${JSON.stringify(config)}' failed for all URLs`);
|
|
}
|
|
|
|
async detectFaces(imagePath: string, { modelName, minScore }: FaceDetectionOptions) {
|
|
const request = {
|
|
[ModelTask.FACIAL_RECOGNITION]: {
|
|
[ModelType.DETECTION]: { modelName, options: { minScore } },
|
|
[ModelType.RECOGNITION]: { modelName },
|
|
},
|
|
};
|
|
const response = await this.predict<FacialRecognitionResponse>({ imagePath }, request);
|
|
return {
|
|
imageHeight: response.imageHeight,
|
|
imageWidth: response.imageWidth,
|
|
faces: response[ModelTask.FACIAL_RECOGNITION],
|
|
};
|
|
}
|
|
|
|
async encodeImage(imagePath: string, { modelName }: CLIPConfig) {
|
|
const request = { [ModelTask.SEARCH]: { [ModelType.VISUAL]: { modelName } } };
|
|
const response = await this.predict<ClipVisualResponse>({ imagePath }, request);
|
|
return response[ModelTask.SEARCH];
|
|
}
|
|
|
|
async encodeText(text: string, { language, modelName }: TextEncodingOptions) {
|
|
const request = { [ModelTask.SEARCH]: { [ModelType.TEXTUAL]: { modelName, options: { language } } } };
|
|
const response = await this.predict<ClipTextualResponse>({ text }, request);
|
|
return response[ModelTask.SEARCH];
|
|
}
|
|
|
|
async ocr(imagePath: string, { modelName, minDetectionScore, minRecognitionScore, maxResolution }: OcrOptions) {
|
|
const request = {
|
|
[ModelTask.OCR]: {
|
|
[ModelType.DETECTION]: { modelName, options: { minScore: minDetectionScore, maxResolution } },
|
|
[ModelType.RECOGNITION]: { modelName, options: { minScore: minRecognitionScore } },
|
|
},
|
|
};
|
|
const response = await this.predict<OcrResponse>({ imagePath }, request);
|
|
return response[ModelTask.OCR];
|
|
}
|
|
|
|
private async getFormData(payload: ModelPayload, config: MachineLearningRequest): Promise<FormData> {
|
|
const formData = new FormData();
|
|
formData.append('entries', JSON.stringify(config));
|
|
|
|
if ('imagePath' in payload) {
|
|
const fileBuffer = await readFile(payload.imagePath);
|
|
formData.append('image', new Blob([new Uint8Array(fileBuffer)]));
|
|
} else if ('text' in payload) {
|
|
formData.append('text', payload.text);
|
|
} else {
|
|
throw new Error('Invalid input');
|
|
}
|
|
|
|
return formData;
|
|
}
|
|
}
|