mirror of
				https://github.com/immich-app/immich.git
				synced 2025-10-31 02:27:08 -04:00 
			
		
		
		
	* feat: add OCR functionality and related configurations * chore: update labeler configuration for machine learning files * feat(i18n): enhance OCR model descriptions and add orientation classification and unwarping features * chore: update Dockerfile to include ccache for improved build performance * feat(ocr): enhance OCR model configuration with orientation classification and unwarping options, update PaddleOCR integration, and improve response structure * refactor(ocr): remove OCR_CLEANUP job from enum and type definitions * refactor(ocr): remove obsolete OCR entity and migration files, and update asset job status and schema to accommodate new OCR table structure * refactor(ocr): update OCR schema and response structure to use individual coordinates instead of bounding box, and adjust related service and repository files * feat: enhance OCR configuration and functionality - Updated OCR settings to include minimum detection box score, minimum detection score, and minimum recognition score. - Refactored PaddleOCRecognizer to utilize new scoring parameters. - Introduced new database tables for asset OCR data and search functionality. - Modified related services and repositories to support the new OCR features. - Updated translations for improved clarity in settings UI. * sql changes * use rapidocr * change dto * update web * update lock * update api * store positions as normalized floats * match column order in db * update admin ui settings descriptions fix max resolution key set min threshold to 0.1 fix bind * apply config correctly, adjust defaults * unnecessary model type * unnecessary sources * fix(ocr): switch RapidOCR lang type from LangDet to LangRec * fix(ocr): expose lang_type (LangRec.CH) and font_path on OcrOptions for RapidOCR * fix(ocr): make OCR text search case- and accent-insensitive using ILIKE + unaccent * fix(ocr): add OCR search fields * fix: Add OCR database migration and update ML prediction logic. * trigrams are already case insensitive * add tests * format * update migrations * wrong uuid function * linting * maybe fix medium tests * formatting * fix weblate check * openapi * sql * minor fixes * maybe fix medium tests part 2 * passing medium tests * format web * readd sql * format dart * disabled in e2e * chore: translation ordering --------- Co-authored-by: mertalev <101130780+mertalev@users.noreply.github.com> Co-authored-by: Alex Tran <alex.tran1502@gmail.com>
		
			
				
	
	
		
			118 lines
		
	
	
		
			4.8 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			118 lines
		
	
	
		
			4.8 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| from typing import Any
 | |
| 
 | |
| import cv2
 | |
| import numpy as np
 | |
| from numpy.typing import NDArray
 | |
| from PIL.Image import Image
 | |
| from rapidocr.ch_ppocr_rec import TextRecInput
 | |
| from rapidocr.ch_ppocr_rec import TextRecognizer as RapidTextRecognizer
 | |
| from rapidocr.inference_engine.base import FileInfo, InferSession
 | |
| from rapidocr.utils import DownloadFile, DownloadFileInput
 | |
| from rapidocr.utils.typings import EngineType, LangRec, OCRVersion, TaskType
 | |
| from rapidocr.utils.typings import ModelType as RapidModelType
 | |
| 
 | |
| from immich_ml.config import log, settings
 | |
| from immich_ml.models.base import InferenceModel
 | |
| from immich_ml.schemas import ModelFormat, ModelSession, ModelTask, ModelType
 | |
| from immich_ml.sessions.ort import OrtSession
 | |
| 
 | |
| from .schemas import OcrOptions, TextDetectionOutput, TextRecognitionOutput
 | |
| 
 | |
| 
 | |
| class TextRecognizer(InferenceModel):
 | |
|     depends = [(ModelType.DETECTION, ModelTask.OCR)]
 | |
|     identity = (ModelType.RECOGNITION, ModelTask.OCR)
 | |
| 
 | |
|     def __init__(self, model_name: str, **model_kwargs: Any) -> None:
 | |
|         self.min_score = model_kwargs.get("minScore", 0.9)
 | |
|         self._empty: TextRecognitionOutput = {
 | |
|             "box": np.empty(0, dtype=np.float32),
 | |
|             "boxScore": np.empty(0, dtype=np.float32),
 | |
|             "text": [],
 | |
|             "textScore": np.empty(0, dtype=np.float32),
 | |
|         }
 | |
|         super().__init__(model_name, **model_kwargs, model_format=ModelFormat.ONNX)
 | |
| 
 | |
|     def _download(self) -> None:
 | |
|         model_info = InferSession.get_model_url(
 | |
|             FileInfo(
 | |
|                 engine_type=EngineType.ONNXRUNTIME,
 | |
|                 ocr_version=OCRVersion.PPOCRV5,
 | |
|                 task_type=TaskType.REC,
 | |
|                 lang_type=LangRec.CH,
 | |
|                 model_type=RapidModelType.MOBILE if "mobile" in self.model_name else RapidModelType.SERVER,
 | |
|             )
 | |
|         )
 | |
|         download_params = DownloadFileInput(
 | |
|             file_url=model_info["model_dir"],
 | |
|             sha256=model_info["SHA256"],
 | |
|             save_path=self.model_path,
 | |
|             logger=log,
 | |
|         )
 | |
|         DownloadFile.run(download_params)
 | |
| 
 | |
|     def _load(self) -> ModelSession:
 | |
|         # TODO: support other runtimes
 | |
|         session = OrtSession(self.model_path)
 | |
|         self.model = RapidTextRecognizer(
 | |
|             OcrOptions(
 | |
|                 session=session.session,
 | |
|                 rec_batch_num=settings.max_batch_size.text_recognition if settings.max_batch_size is not None else 6,
 | |
|                 rec_img_shape=(3, 48, 320),
 | |
|             )
 | |
|         )
 | |
|         return session
 | |
| 
 | |
|     def _predict(self, _: Image, texts: TextDetectionOutput) -> TextRecognitionOutput:
 | |
|         boxes, img, box_scores = texts["boxes"], texts["image"], texts["scores"]
 | |
|         if boxes.shape[0] == 0:
 | |
|             return self._empty
 | |
|         rec = self.model(TextRecInput(img=self.get_crop_img_list(img, boxes)))
 | |
|         if rec.txts is None:
 | |
|             return self._empty
 | |
| 
 | |
|         height, width = img.shape[0:2]
 | |
|         boxes[:, :, 0] /= width
 | |
|         boxes[:, :, 1] /= height
 | |
| 
 | |
|         text_scores = np.array(rec.scores)
 | |
|         valid_text_score_idx = text_scores > self.min_score
 | |
|         valid_score_idx_list = valid_text_score_idx.tolist()
 | |
|         return {
 | |
|             "box": boxes.reshape(-1, 8)[valid_text_score_idx].reshape(-1),
 | |
|             "text": [rec.txts[i] for i in range(len(rec.txts)) if valid_score_idx_list[i]],
 | |
|             "boxScore": box_scores[valid_text_score_idx],
 | |
|             "textScore": text_scores[valid_text_score_idx],
 | |
|         }
 | |
| 
 | |
|     def get_crop_img_list(self, img: NDArray[np.float32], boxes: NDArray[np.float32]) -> list[NDArray[np.float32]]:
 | |
|         img_crop_width = np.maximum(
 | |
|             np.linalg.norm(boxes[:, 1] - boxes[:, 0], axis=1), np.linalg.norm(boxes[:, 2] - boxes[:, 3], axis=1)
 | |
|         ).astype(np.int32)
 | |
|         img_crop_height = np.maximum(
 | |
|             np.linalg.norm(boxes[:, 0] - boxes[:, 3], axis=1), np.linalg.norm(boxes[:, 1] - boxes[:, 2], axis=1)
 | |
|         ).astype(np.int32)
 | |
|         pts_std = np.zeros((img_crop_width.shape[0], 4, 2), dtype=np.float32)
 | |
|         pts_std[:, 1:3, 0] = img_crop_width[:, None]
 | |
|         pts_std[:, 2:4, 1] = img_crop_height[:, None]
 | |
| 
 | |
|         img_crop_sizes = np.stack([img_crop_width, img_crop_height], axis=1).tolist()
 | |
|         imgs: list[NDArray[np.float32]] = []
 | |
|         for box, pts_std, dst_size in zip(list(boxes), list(pts_std), img_crop_sizes):
 | |
|             M = cv2.getPerspectiveTransform(box, pts_std)
 | |
|             dst_img: NDArray[np.float32] = cv2.warpPerspective(
 | |
|                 img,
 | |
|                 M,
 | |
|                 dst_size,
 | |
|                 borderMode=cv2.BORDER_REPLICATE,
 | |
|                 flags=cv2.INTER_CUBIC,
 | |
|             )  # type: ignore
 | |
|             dst_height, dst_width = dst_img.shape[0:2]
 | |
|             if dst_height * 1.0 / dst_width >= 1.5:
 | |
|                 dst_img = np.rot90(dst_img)
 | |
|             imgs.append(dst_img)
 | |
|         return imgs
 | |
| 
 | |
|     def configure(self, **kwargs: Any) -> None:
 | |
|         self.min_score = kwargs.get("minScore", self.min_score)
 |