mirror of
				https://github.com/immich-app/immich.git
				synced 2025-11-04 03:39:37 -05:00 
			
		
		
		
	* consolidated endpoints, added live configuration * added ml settings to server * added settings dashboard * updated deps, fixed typos * simplified modelconfig updated tests * Added ml setting accordion for admin page updated tests * merge `clipText` and `clipVision` * added face distance setting clarified setting * add clip mode in request, dropdown for face models * polished ml settings updated descriptions * update clip field on error * removed unused import * add description for image classification threshold * pin safetensors for arm wheel updated poetry lock * moved dto * set model type only in ml repository * revert form-data package install use fetch instead of axios * added slotted description with link updated facial recognition description clarified effect of disabling tasks * validation before model load * removed unnecessary getconfig call * added migration * updated api updated api updated api --------- Co-authored-by: Alex Tran <alex.tran1502@gmail.com>
		
			
				
	
	
		
			34 lines
		
	
	
		
			602 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			34 lines
		
	
	
		
			602 B
		
	
	
	
		
			Python
		
	
	
	
	
	
from enum import StrEnum
 | 
						|
 | 
						|
from pydantic import BaseModel
 | 
						|
 | 
						|
 | 
						|
def to_lower_camel(string: str) -> str:
 | 
						|
    tokens = [token.capitalize() if i > 0 else token for i, token in enumerate(string.split("_"))]
 | 
						|
    return "".join(tokens)
 | 
						|
 | 
						|
 | 
						|
class TextModelRequest(BaseModel):
 | 
						|
    text: str
 | 
						|
 | 
						|
 | 
						|
class TextResponse(BaseModel):
 | 
						|
    __root__: str
 | 
						|
 | 
						|
 | 
						|
class MessageResponse(BaseModel):
 | 
						|
    message: str
 | 
						|
 | 
						|
 | 
						|
class BoundingBox(BaseModel):
 | 
						|
    x1: int
 | 
						|
    y1: int
 | 
						|
    x2: int
 | 
						|
    y2: int
 | 
						|
 | 
						|
 | 
						|
class ModelType(StrEnum):
 | 
						|
    IMAGE_CLASSIFICATION = "image-classification"
 | 
						|
    CLIP = "clip"
 | 
						|
    FACIAL_RECOGNITION = "facial-recognition"
 |