mirror of
				https://github.com/immich-app/immich.git
				synced 2025-10-30 18:22:37 -04:00 
			
		
		
		
	* feat: add OCR functionality and related configurations * chore: update labeler configuration for machine learning files * feat(i18n): enhance OCR model descriptions and add orientation classification and unwarping features * chore: update Dockerfile to include ccache for improved build performance * feat(ocr): enhance OCR model configuration with orientation classification and unwarping options, update PaddleOCR integration, and improve response structure * refactor(ocr): remove OCR_CLEANUP job from enum and type definitions * refactor(ocr): remove obsolete OCR entity and migration files, and update asset job status and schema to accommodate new OCR table structure * refactor(ocr): update OCR schema and response structure to use individual coordinates instead of bounding box, and adjust related service and repository files * feat: enhance OCR configuration and functionality - Updated OCR settings to include minimum detection box score, minimum detection score, and minimum recognition score. - Refactored PaddleOCRecognizer to utilize new scoring parameters. - Introduced new database tables for asset OCR data and search functionality. - Modified related services and repositories to support the new OCR features. - Updated translations for improved clarity in settings UI. * sql changes * use rapidocr * change dto * update web * update lock * update api * store positions as normalized floats * match column order in db * update admin ui settings descriptions fix max resolution key set min threshold to 0.1 fix bind * apply config correctly, adjust defaults * unnecessary model type * unnecessary sources * fix(ocr): switch RapidOCR lang type from LangDet to LangRec * fix(ocr): expose lang_type (LangRec.CH) and font_path on OcrOptions for RapidOCR * fix(ocr): make OCR text search case- and accent-insensitive using ILIKE + unaccent * fix(ocr): add OCR search fields * fix: Add OCR database migration and update ML prediction logic. * trigrams are already case insensitive * add tests * format * update migrations * wrong uuid function * linting * maybe fix medium tests * formatting * fix weblate check * openapi * sql * minor fixes * maybe fix medium tests part 2 * passing medium tests * format web * readd sql * format dart * disabled in e2e * chore: translation ordering --------- Co-authored-by: mertalev <101130780+mertalev@users.noreply.github.com> Co-authored-by: Alex Tran <alex.tran1502@gmail.com>
		
			
				
	
	
		
			121 lines
		
	
	
		
			4.7 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			121 lines
		
	
	
		
			4.7 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| import json
 | |
| from abc import abstractmethod
 | |
| from functools import cached_property
 | |
| from pathlib import Path
 | |
| from typing import Any
 | |
| 
 | |
| import numpy as np
 | |
| from numpy.typing import NDArray
 | |
| from tokenizers import Encoding, Tokenizer
 | |
| 
 | |
| from immich_ml.config import log
 | |
| from immich_ml.models.base import InferenceModel
 | |
| from immich_ml.models.constants import WEBLATE_TO_FLORES200
 | |
| from immich_ml.models.transforms import clean_text, serialize_np_array
 | |
| from immich_ml.schemas import ModelSession, ModelTask, ModelType
 | |
| 
 | |
| 
 | |
| class BaseCLIPTextualEncoder(InferenceModel):
 | |
|     depends = []
 | |
|     identity = (ModelType.TEXTUAL, ModelTask.SEARCH)
 | |
| 
 | |
|     def _predict(self, inputs: str, language: str | None = None) -> str:
 | |
|         tokens = self.tokenize(inputs, language=language)
 | |
|         res: NDArray[np.float32] = self.session.run(None, tokens)[0][0]
 | |
|         return serialize_np_array(res)
 | |
| 
 | |
|     def _load(self) -> ModelSession:
 | |
|         session = super()._load()
 | |
|         log.debug(f"Loading tokenizer for CLIP model '{self.model_name}'")
 | |
|         self.tokenizer = self._load_tokenizer()
 | |
|         tokenizer_kwargs: dict[str, Any] | None = self.text_cfg.get("tokenizer_kwargs")
 | |
|         self.canonicalize = tokenizer_kwargs is not None and tokenizer_kwargs.get("clean") == "canonicalize"
 | |
|         self.is_nllb = self.model_name.startswith("nllb")
 | |
|         log.debug(f"Loaded tokenizer for CLIP model '{self.model_name}'")
 | |
| 
 | |
|         return session
 | |
| 
 | |
|     @abstractmethod
 | |
|     def _load_tokenizer(self) -> Tokenizer:
 | |
|         pass
 | |
| 
 | |
|     @abstractmethod
 | |
|     def tokenize(self, text: str, language: str | None = None) -> dict[str, NDArray[np.int32]]:
 | |
|         pass
 | |
| 
 | |
|     @property
 | |
|     def model_cfg_path(self) -> Path:
 | |
|         return self.cache_dir / "config.json"
 | |
| 
 | |
|     @property
 | |
|     def tokenizer_file_path(self) -> Path:
 | |
|         return self.model_dir / "tokenizer.json"
 | |
| 
 | |
|     @property
 | |
|     def tokenizer_cfg_path(self) -> Path:
 | |
|         return self.model_dir / "tokenizer_config.json"
 | |
| 
 | |
|     @cached_property
 | |
|     def model_cfg(self) -> dict[str, Any]:
 | |
|         log.debug(f"Loading model config for CLIP model '{self.model_name}'")
 | |
|         model_cfg: dict[str, Any] = json.load(self.model_cfg_path.open())
 | |
|         log.debug(f"Loaded model config for CLIP model '{self.model_name}'")
 | |
|         return model_cfg
 | |
| 
 | |
|     @property
 | |
|     def text_cfg(self) -> dict[str, Any]:
 | |
|         text_cfg: dict[str, Any] = self.model_cfg["text_cfg"]
 | |
|         return text_cfg
 | |
| 
 | |
|     @cached_property
 | |
|     def tokenizer_file(self) -> dict[str, Any]:
 | |
|         log.debug(f"Loading tokenizer file for CLIP model '{self.model_name}'")
 | |
|         tokenizer_file: dict[str, Any] = json.load(self.tokenizer_file_path.open())
 | |
|         log.debug(f"Loaded tokenizer file for CLIP model '{self.model_name}'")
 | |
|         return tokenizer_file
 | |
| 
 | |
|     @cached_property
 | |
|     def tokenizer_cfg(self) -> dict[str, Any]:
 | |
|         log.debug(f"Loading tokenizer config for CLIP model '{self.model_name}'")
 | |
|         tokenizer_cfg: dict[str, Any] = json.load(self.tokenizer_cfg_path.open())
 | |
|         log.debug(f"Loaded tokenizer config for CLIP model '{self.model_name}'")
 | |
|         return tokenizer_cfg
 | |
| 
 | |
| 
 | |
| class OpenClipTextualEncoder(BaseCLIPTextualEncoder):
 | |
|     def _load_tokenizer(self) -> Tokenizer:
 | |
|         context_length: int = self.text_cfg.get("context_length", 77)
 | |
|         pad_token: str = self.tokenizer_cfg["pad_token"]
 | |
| 
 | |
|         tokenizer: Tokenizer = Tokenizer.from_file(self.tokenizer_file_path.as_posix())
 | |
| 
 | |
|         pad_id: int = tokenizer.token_to_id(pad_token)
 | |
|         tokenizer.enable_padding(length=context_length, pad_token=pad_token, pad_id=pad_id)
 | |
|         tokenizer.enable_truncation(max_length=context_length)
 | |
| 
 | |
|         return tokenizer
 | |
| 
 | |
|     def tokenize(self, text: str, language: str | None = None) -> dict[str, NDArray[np.int32]]:
 | |
|         text = clean_text(text, canonicalize=self.canonicalize)
 | |
|         if self.is_nllb and language is not None:
 | |
|             flores_code = WEBLATE_TO_FLORES200.get(language)
 | |
|             if flores_code is None:
 | |
|                 no_country = language.split("-")[0]
 | |
|                 flores_code = WEBLATE_TO_FLORES200.get(no_country)
 | |
|                 if flores_code is None:
 | |
|                     log.warning(f"Language '{language}' not found, defaulting to 'en'")
 | |
|                     flores_code = "eng_Latn"
 | |
|             text = f"{flores_code}{text}"
 | |
|         tokens: Encoding = self.tokenizer.encode(text)
 | |
|         return {"text": np.array([tokens.ids], dtype=np.int32)}
 | |
| 
 | |
| 
 | |
| class MClipTextualEncoder(OpenClipTextualEncoder):
 | |
|     def tokenize(self, text: str, language: str | None = None) -> dict[str, NDArray[np.int32]]:
 | |
|         text = clean_text(text, canonicalize=self.canonicalize)
 | |
|         tokens: Encoding = self.tokenizer.encode(text)
 | |
|         return {
 | |
|             "input_ids": np.array([tokens.ids], dtype=np.int32),
 | |
|             "attention_mask": np.array([tokens.attention_mask], dtype=np.int32),
 | |
|         }
 |