mirror of
				https://github.com/immich-app/immich.git
				synced 2025-11-03 19:17:11 -05:00 
			
		
		
		
	feat: Allow multiple ML models to be preloaded (#15418)
This commit is contained in:
		
							parent
							
								
									345791c0e6
								
							
						
					
					
						commit
						1d0d4fc281
					
				@ -159,10 +159,10 @@ Redis (Sentinel) URL example JSON before encoding:
 | 
			
		||||
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup>                    | Number of worker processes to spawn                                                                 |               `1`               | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup>   | HTTP Keep-alive time in seconds                                                                     |               `2`               | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_WORKER_TIMEOUT`                           | Maximum time (s) of unresponsiveness before a worker is killed                                      | `120` (`300` if using OpenVINO) | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL`                   | Name of the textual CLIP model to be preloaded and kept in cache                                    |                                 | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_PRELOAD__CLIP__VISUAL`                    | Name of the visual CLIP model to be preloaded and kept in cache                                     |                                 | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION` | Name of the recognition portion of the facial recognition model to be preloaded and kept in cache   |                                 | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION`   | Name of the detection portion of the facial recognition model to be preloaded and kept in cache     |                                 | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL`                   | Comma-separated list of (textual) CLIP model(s) to preload and cache                                |                                 | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_PRELOAD__CLIP__VISUAL`                    | Comma-separated list of (visual) CLIP model(s) to preload and cache                                 |                                 | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION` | Comma-separated list of (recognition) facial recognition model(s) to preload and cache              |                                 | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION`   | Comma-separated list of (detection) facial recognition model(s) to preload and cache                |                                 | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_ANN`                                      | Enable ARM-NN hardware acceleration if supported                                                    |             `True`              | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_ANN_FP16_TURBO`                           | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) |             `False`             | machine learning |
 | 
			
		||||
| `MACHINE_LEARNING_ANN_TUNING_LEVEL`                         | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive)                                        |               `2`               | machine learning |
 | 
			
		||||
 | 
			
		||||
@ -77,29 +77,31 @@ async def lifespan(_: FastAPI) -> AsyncGenerator[None, None]:
 | 
			
		||||
async def preload_models(preload: PreloadModelData) -> None:
 | 
			
		||||
    log.info(f"Preloading models: clip:{preload.clip} facial_recognition:{preload.facial_recognition}")
 | 
			
		||||
 | 
			
		||||
    async def load_models(model_string: str, model_type: ModelType, model_task: ModelTask) -> None:
 | 
			
		||||
        for model_name in model_string.split(","):
 | 
			
		||||
            model_name = model_name.strip()
 | 
			
		||||
            model = await model_cache.get(model_name, model_type, model_task)
 | 
			
		||||
            await load(model)
 | 
			
		||||
 | 
			
		||||
    if preload.clip.textual is not None:
 | 
			
		||||
        model = await model_cache.get(preload.clip.textual, ModelType.TEXTUAL, ModelTask.SEARCH)
 | 
			
		||||
        await load(model)
 | 
			
		||||
        await load_models(preload.clip.textual, ModelType.TEXTUAL, ModelTask.SEARCH)
 | 
			
		||||
 | 
			
		||||
    if preload.clip.visual is not None:
 | 
			
		||||
        model = await model_cache.get(preload.clip.visual, ModelType.VISUAL, ModelTask.SEARCH)
 | 
			
		||||
        await load(model)
 | 
			
		||||
        await load_models(preload.clip.visual, ModelType.VISUAL, ModelTask.SEARCH)
 | 
			
		||||
 | 
			
		||||
    if preload.facial_recognition.detection is not None:
 | 
			
		||||
        model = await model_cache.get(
 | 
			
		||||
        await load_models(
 | 
			
		||||
            preload.facial_recognition.detection,
 | 
			
		||||
            ModelType.DETECTION,
 | 
			
		||||
            ModelTask.FACIAL_RECOGNITION,
 | 
			
		||||
        )
 | 
			
		||||
        await load(model)
 | 
			
		||||
 | 
			
		||||
    if preload.facial_recognition.recognition is not None:
 | 
			
		||||
        model = await model_cache.get(
 | 
			
		||||
        await load_models(
 | 
			
		||||
            preload.facial_recognition.recognition,
 | 
			
		||||
            ModelType.RECOGNITION,
 | 
			
		||||
            ModelTask.FACIAL_RECOGNITION,
 | 
			
		||||
        )
 | 
			
		||||
        await load(model)
 | 
			
		||||
 | 
			
		||||
    if preload.clip_fallback is not None:
 | 
			
		||||
        log.warning(
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user