immich/server/src/interfaces/machine-learning.interface.ts
Daniel Dietzler 332a865ce6
refactor: migrate person repository to kysely (#15242)
* refactor: migrate person repository to kysely

* `asVector` begone

* linting

* fix metadata faces

* update test

---------

Co-authored-by: Alex <alex.tran1502@gmail.com>
Co-authored-by: mertalev <101130780+mertalev@users.noreply.github.com>
2025-01-21 13:12:28 -05:00

58 lines
1.9 KiB
TypeScript

export const IMachineLearningRepository = 'IMachineLearningRepository';
export interface BoundingBox {
x1: number;
y1: number;
x2: number;
y2: number;
}
export enum ModelTask {
FACIAL_RECOGNITION = 'facial-recognition',
SEARCH = 'clip',
}
export enum ModelType {
DETECTION = 'detection',
PIPELINE = 'pipeline',
RECOGNITION = 'recognition',
TEXTUAL = 'textual',
VISUAL = 'visual',
}
export type ModelPayload = { imagePath: string } | { text: string };
type ModelOptions = { modelName: string };
export type FaceDetectionOptions = ModelOptions & { minScore: number };
type VisualResponse = { imageHeight: number; imageWidth: number };
export type ClipVisualRequest = { [ModelTask.SEARCH]: { [ModelType.VISUAL]: ModelOptions } };
export type ClipVisualResponse = { [ModelTask.SEARCH]: string } & VisualResponse;
export type ClipTextualRequest = { [ModelTask.SEARCH]: { [ModelType.TEXTUAL]: ModelOptions } };
export type ClipTextualResponse = { [ModelTask.SEARCH]: string };
export type FacialRecognitionRequest = {
[ModelTask.FACIAL_RECOGNITION]: {
[ModelType.DETECTION]: ModelOptions & { options: { minScore: number } };
[ModelType.RECOGNITION]: ModelOptions;
};
};
export interface Face {
boundingBox: BoundingBox;
embedding: string;
score: number;
}
export type FacialRecognitionResponse = { [ModelTask.FACIAL_RECOGNITION]: Face[] } & VisualResponse;
export type DetectedFaces = { faces: Face[] } & VisualResponse;
export type MachineLearningRequest = ClipVisualRequest | ClipTextualRequest | FacialRecognitionRequest;
export interface IMachineLearningRepository {
encodeImage(urls: string[], imagePath: string, config: ModelOptions): Promise<string>;
encodeText(urls: string[], text: string, config: ModelOptions): Promise<string>;
detectFaces(urls: string[], imagePath: string, config: FaceDetectionOptions): Promise<DetectedFaces>;
}