export const IMachineLearningRepository = 'IMachineLearningRepository'; export interface BoundingBox { x1: number; y1: number; x2: number; y2: number; } export enum ModelTask { FACIAL_RECOGNITION = 'facial-recognition', SEARCH = 'clip', } export enum ModelType { DETECTION = 'detection', PIPELINE = 'pipeline', RECOGNITION = 'recognition', TEXTUAL = 'textual', VISUAL = 'visual', } export type ModelPayload = { imagePath: string } | { text: string }; type ModelOptions = { modelName: string }; export type FaceDetectionOptions = ModelOptions & { minScore: number }; type VisualResponse = { imageHeight: number; imageWidth: number }; export type ClipVisualRequest = { [ModelTask.SEARCH]: { [ModelType.VISUAL]: ModelOptions } }; export type ClipVisualResponse = { [ModelTask.SEARCH]: number[] } & VisualResponse; export type ClipTextualRequest = { [ModelTask.SEARCH]: { [ModelType.TEXTUAL]: ModelOptions } }; export type ClipTextualResponse = { [ModelTask.SEARCH]: number[] }; export type FacialRecognitionRequest = { [ModelTask.FACIAL_RECOGNITION]: { [ModelType.DETECTION]: FaceDetectionOptions; [ModelType.RECOGNITION]: ModelOptions; }; }; export interface Face { boundingBox: BoundingBox; embedding: number[]; score: number; } export type FacialRecognitionResponse = { [ModelTask.FACIAL_RECOGNITION]: Face[] } & VisualResponse; export type DetectedFaces = { faces: Face[] } & VisualResponse; export type MachineLearningRequest = ClipVisualRequest | ClipTextualRequest | FacialRecognitionRequest; export interface IMachineLearningRepository { encodeImage(url: string, imagePath: string, config: ModelOptions): Promise; encodeText(url: string, text: string, config: ModelOptions): Promise; detectFaces(url: string, imagePath: string, config: FaceDetectionOptions): Promise; }