mirror of
				https://github.com/immich-app/immich.git
				synced 2025-11-03 19:17:11 -05:00 
			
		
		
		
	feat(ml): configurable batch size for facial recognition (#13689)
* configurable batch size, default openvino to 1 * update docs * don't add a new dependency for two lines * fix typing
This commit is contained in:
		
							parent
							
								
									a76c39812f
								
							
						
					
					
						commit
						1ec9a60e41
					
				@ -149,7 +149,7 @@ Redis (Sentinel) URL example JSON before encoding:
 | 
			
		||||
## Machine Learning
 | 
			
		||||
 | 
			
		||||
| Variable                                                  | Description                                                                                         |             Default             | Containers       |
 | 
			
		||||
| :-------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------------: | :--------------- |
 | 
			
		||||
| :-------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- |
 | 
			
		||||
| `MACHINE_LEARNING_MODEL_TTL`                              | Inactivity time (s) before a model is unloaded (disabled if \<= 0)                                  |              `300`              | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_MODEL_TTL_POLL_S`                       | Interval (s) between checks for the model TTL (disabled if \<= 0)                                   |              `10`               | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_CACHE_FOLDER`                           | Directory where models are downloaded                                                               |            `/cache`             | machine learning |
 | 
			
		||||
@ -158,13 +158,14 @@ Redis (Sentinel) URL example JSON before encoding:
 | 
			
		||||
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS`                 | Number of threads for each model operation                                                          |               `2`               | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup>                  | Number of worker processes to spawn                                                                 |               `1`               | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup> | HTTP Keep-alive time in seconds                                                                     |               `2`               | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_WORKER_TIMEOUT`                         | Maximum time (s) of unresponsiveness before a worker is killed                                      | `120` (`300` if using OpenVINO image) | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_WORKER_TIMEOUT`                         | Maximum time (s) of unresponsiveness before a worker is killed                                      | `120` (`300` if using OpenVINO) | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_PRELOAD__CLIP`                          | Name of a CLIP model to be preloaded and kept in cache                                              |                                 | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION`            | Name of a facial recognition model to be preloaded and kept in cache                                |                                 | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_ANN`                                    | Enable ARM-NN hardware acceleration if supported                                                    |             `True`              | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_ANN_FP16_TURBO`                         | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) |             `False`             | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_ANN_TUNING_LEVEL`                       | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive)                                        |               `2`               | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_DEVICE_IDS`<sup>\*4</sup>               | Device IDs to use in multi-GPU environments                                                         |               `0`               | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION`     | Set the maximum number of faces that will be processed at once by the facial recognition model      |  None (`1` if using OpenVINO)   | machine learning |
 | 
			
		||||
 | 
			
		||||
\*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -19,6 +19,10 @@ class PreloadModelData(BaseModel):
 | 
			
		||||
    facial_recognition: str | None = None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MaxBatchSize(BaseModel):
 | 
			
		||||
    facial_recognition: int | None = None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Settings(BaseSettings):
 | 
			
		||||
    model_config = SettingsConfigDict(
 | 
			
		||||
        env_prefix="MACHINE_LEARNING_",
 | 
			
		||||
@ -41,6 +45,7 @@ class Settings(BaseSettings):
 | 
			
		||||
    ann_fp16_turbo: bool = False
 | 
			
		||||
    ann_tuning_level: int = 2
 | 
			
		||||
    preload: PreloadModelData | None = None
 | 
			
		||||
    max_batch_size: MaxBatchSize | None = None
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def device_id(self) -> str:
 | 
			
		||||
 | 
			
		||||
@ -3,13 +3,14 @@ from typing import Any
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import onnx
 | 
			
		||||
import onnxruntime as ort
 | 
			
		||||
from insightface.model_zoo import ArcFaceONNX
 | 
			
		||||
from insightface.utils.face_align import norm_crop
 | 
			
		||||
from numpy.typing import NDArray
 | 
			
		||||
from onnx.tools.update_model_dims import update_inputs_outputs_dims
 | 
			
		||||
from PIL import Image
 | 
			
		||||
 | 
			
		||||
from app.config import log
 | 
			
		||||
from app.config import log, settings
 | 
			
		||||
from app.models.base import InferenceModel
 | 
			
		||||
from app.models.transforms import decode_cv2
 | 
			
		||||
from app.schemas import FaceDetectionOutput, FacialRecognitionOutput, ModelFormat, ModelSession, ModelTask, ModelType
 | 
			
		||||
@ -22,11 +23,12 @@ class FaceRecognizer(InferenceModel):
 | 
			
		||||
    def __init__(self, model_name: str, min_score: float = 0.7, **model_kwargs: Any) -> None:
 | 
			
		||||
        super().__init__(model_name, **model_kwargs)
 | 
			
		||||
        self.min_score = model_kwargs.pop("minScore", min_score)
 | 
			
		||||
        self.batch = self.model_format == ModelFormat.ONNX
 | 
			
		||||
        max_batch_size = settings.max_batch_size.facial_recognition if settings.max_batch_size else None
 | 
			
		||||
        self.batch_size = max_batch_size if max_batch_size else self._batch_size_default
 | 
			
		||||
 | 
			
		||||
    def _load(self) -> ModelSession:
 | 
			
		||||
        session = self._make_session(self.model_path)
 | 
			
		||||
        if self.batch and str(session.get_inputs()[0].shape[0]) != "batch":
 | 
			
		||||
        if (not self.batch_size or self.batch_size > 1) and str(session.get_inputs()[0].shape[0]) != "batch":
 | 
			
		||||
            self._add_batch_axis(self.model_path)
 | 
			
		||||
            session = self._make_session(self.model_path)
 | 
			
		||||
        self.model = ArcFaceONNX(
 | 
			
		||||
@ -42,18 +44,18 @@ class FaceRecognizer(InferenceModel):
 | 
			
		||||
            return []
 | 
			
		||||
        inputs = decode_cv2(inputs)
 | 
			
		||||
        cropped_faces = self._crop(inputs, faces)
 | 
			
		||||
        embeddings = self._predict_batch(cropped_faces) if self.batch else self._predict_single(cropped_faces)
 | 
			
		||||
        embeddings = self._predict_batch(cropped_faces)
 | 
			
		||||
        return self.postprocess(faces, embeddings)
 | 
			
		||||
 | 
			
		||||
    def _predict_batch(self, cropped_faces: list[NDArray[np.uint8]]) -> NDArray[np.float32]:
 | 
			
		||||
        if not self.batch_size or len(cropped_faces) <= self.batch_size:
 | 
			
		||||
            embeddings: NDArray[np.float32] = self.model.get_feat(cropped_faces)
 | 
			
		||||
            return embeddings
 | 
			
		||||
 | 
			
		||||
    def _predict_single(self, cropped_faces: list[NDArray[np.uint8]]) -> NDArray[np.float32]:
 | 
			
		||||
        embeddings: list[NDArray[np.float32]] = []
 | 
			
		||||
        for face in cropped_faces:
 | 
			
		||||
            embeddings.append(self.model.get_feat(face))
 | 
			
		||||
        return np.concatenate(embeddings, axis=0)
 | 
			
		||||
        batch_embeddings: list[NDArray[np.float32]] = []
 | 
			
		||||
        for i in range(0, len(cropped_faces), self.batch_size):
 | 
			
		||||
            batch_embeddings.append(self.model.get_feat(cropped_faces[i : i + self.batch_size]))
 | 
			
		||||
        return np.concatenate(batch_embeddings, axis=0)
 | 
			
		||||
 | 
			
		||||
    def postprocess(self, faces: FaceDetectionOutput, embeddings: NDArray[np.float32]) -> FacialRecognitionOutput:
 | 
			
		||||
        return [
 | 
			
		||||
@ -77,3 +79,8 @@ class FaceRecognizer(InferenceModel):
 | 
			
		||||
        output_dims = {proto.graph.output[0].name: ["batch"] + static_output_dims}
 | 
			
		||||
        updated_proto = update_inputs_outputs_dims(proto, input_dims, output_dims)
 | 
			
		||||
        onnx.save(updated_proto, model_path)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def _batch_size_default(self) -> int | None:
 | 
			
		||||
        providers = ort.get_available_providers()
 | 
			
		||||
        return None if self.model_format == ModelFormat.ONNX and "OpenVINOExecutionProvider" not in providers else 1
 | 
			
		||||
 | 
			
		||||
@ -549,7 +549,7 @@ class TestFaceRecognition:
 | 
			
		||||
        face_recognizer = FaceRecognizer("buffalo_s", cache_dir=path)
 | 
			
		||||
        face_recognizer.load()
 | 
			
		||||
 | 
			
		||||
        assert face_recognizer.batch is True
 | 
			
		||||
        assert face_recognizer.batch_size is None
 | 
			
		||||
        update_dims.assert_called_once_with(proto, {"input.1": ["batch", 3, 224, 224]}, {"output.1": ["batch", 800]})
 | 
			
		||||
        onnx.save.assert_called_once_with(update_dims.return_value, face_recognizer.model_path)
 | 
			
		||||
 | 
			
		||||
@ -572,7 +572,7 @@ class TestFaceRecognition:
 | 
			
		||||
        face_recognizer = FaceRecognizer("buffalo_s", cache_dir=path)
 | 
			
		||||
        face_recognizer.load()
 | 
			
		||||
 | 
			
		||||
        assert face_recognizer.batch is True
 | 
			
		||||
        assert face_recognizer.batch_size is None
 | 
			
		||||
        update_dims.assert_not_called()
 | 
			
		||||
        onnx.load.assert_not_called()
 | 
			
		||||
        onnx.save.assert_not_called()
 | 
			
		||||
@ -596,7 +596,33 @@ class TestFaceRecognition:
 | 
			
		||||
        face_recognizer = FaceRecognizer("buffalo_s", model_format=ModelFormat.ARMNN, cache_dir=path)
 | 
			
		||||
        face_recognizer.load()
 | 
			
		||||
 | 
			
		||||
        assert face_recognizer.batch is False
 | 
			
		||||
        assert face_recognizer.batch_size == 1
 | 
			
		||||
        update_dims.assert_not_called()
 | 
			
		||||
        onnx.load.assert_not_called()
 | 
			
		||||
        onnx.save.assert_not_called()
 | 
			
		||||
 | 
			
		||||
    def test_recognition_does_not_add_batch_axis_for_openvino(
 | 
			
		||||
        self, ort_session: mock.Mock, path: mock.Mock, mocker: MockerFixture
 | 
			
		||||
    ) -> None:
 | 
			
		||||
        onnx = mocker.patch("app.models.facial_recognition.recognition.onnx", autospec=True)
 | 
			
		||||
        update_dims = mocker.patch(
 | 
			
		||||
            "app.models.facial_recognition.recognition.update_inputs_outputs_dims", autospec=True
 | 
			
		||||
        )
 | 
			
		||||
        mocker.patch("app.models.base.InferenceModel.download")
 | 
			
		||||
        mocker.patch("app.models.facial_recognition.recognition.ArcFaceONNX")
 | 
			
		||||
        path.return_value.__truediv__.return_value.__truediv__.return_value.suffix = ".onnx"
 | 
			
		||||
 | 
			
		||||
        inputs = [SimpleNamespace(name="input.1", shape=("batch", 3, 224, 224))]
 | 
			
		||||
        outputs = [SimpleNamespace(name="output.1", shape=("batch", 800))]
 | 
			
		||||
        ort_session.return_value.get_inputs.return_value = inputs
 | 
			
		||||
        ort_session.return_value.get_outputs.return_value = outputs
 | 
			
		||||
 | 
			
		||||
        face_recognizer = FaceRecognizer(
 | 
			
		||||
            "buffalo_s", model_format=ModelFormat.ARMNN, cache_dir=path, providers=["OpenVINOExecutionProvider"]
 | 
			
		||||
        )
 | 
			
		||||
        face_recognizer.load()
 | 
			
		||||
 | 
			
		||||
        assert face_recognizer.batch_size == 1
 | 
			
		||||
        update_dims.assert_not_called()
 | 
			
		||||
        onnx.load.assert_not_called()
 | 
			
		||||
        onnx.save.assert_not_called()
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user