mirror of
https://github.com/immich-app/immich.git
synced 2025-06-04 22:24:26 -04:00
export cli
This commit is contained in:
parent
ce2a41826e
commit
c57c562166
10
machine-learning/.gitignore
vendored
10
machine-learning/.gitignore
vendored
@ -1,5 +1,15 @@
|
|||||||
*.zip
|
*.zip
|
||||||
*.onnx
|
*.onnx
|
||||||
|
*.rknn
|
||||||
|
*.npy
|
||||||
|
*_attr__value
|
||||||
|
tokenizer.json
|
||||||
|
tokenizer_config.json
|
||||||
|
special_tokens_map.json
|
||||||
|
preprocess_cfg.json
|
||||||
|
config.json
|
||||||
|
merges.txt
|
||||||
|
vocab.json
|
||||||
upload/
|
upload/
|
||||||
venv/
|
venv/
|
||||||
__pycache__/
|
__pycache__/
|
||||||
|
@ -33,7 +33,6 @@ class InferenceModel(ABC):
|
|||||||
self.model_name = clean_name(model_name)
|
self.model_name = clean_name(model_name)
|
||||||
self.cache_dir = Path(cache_dir) if cache_dir is not None else self._cache_dir_default
|
self.cache_dir = Path(cache_dir) if cache_dir is not None else self._cache_dir_default
|
||||||
self.model_format = model_format if model_format is not None else self._model_format_default
|
self.model_format = model_format if model_format is not None else self._model_format_default
|
||||||
self.model_path_prefix = rknn.model_prefix if self.model_format == ModelFormat.RKNN else None
|
|
||||||
if session is not None:
|
if session is not None:
|
||||||
self.session = session
|
self.session = session
|
||||||
|
|
||||||
@ -121,15 +120,19 @@ class InferenceModel(ABC):
|
|||||||
raise ValueError(f"Unsupported model file type: {model_path.suffix}")
|
raise ValueError(f"Unsupported model file type: {model_path.suffix}")
|
||||||
return session
|
return session
|
||||||
|
|
||||||
|
def model_path_for_format(self, model_format: ModelFormat) -> Path:
|
||||||
|
model_path_prefix = rknn.model_prefix if model_format == ModelFormat.RKNN else None
|
||||||
|
if model_path_prefix:
|
||||||
|
return self.model_dir / model_path_prefix / f"model.{model_format}"
|
||||||
|
return self.model_dir / f"model.{model_format}"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def model_dir(self) -> Path:
|
def model_dir(self) -> Path:
|
||||||
return self.cache_dir / self.model_type.value
|
return self.cache_dir / self.model_type.value
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def model_path(self) -> Path:
|
def model_path(self) -> Path:
|
||||||
if self.model_path_prefix:
|
return self.model_path_for_format(self.model_format)
|
||||||
return self.model_dir / self.model_path_prefix / f"model.{self.model_format}"
|
|
||||||
return self.model_dir / f"model.{self.model_format}"
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def model_task(self) -> ModelTask:
|
def model_task(self) -> ModelTask:
|
||||||
|
@ -65,6 +65,9 @@ _INSIGHTFACE_MODELS = {
|
|||||||
|
|
||||||
SUPPORTED_PROVIDERS = ["CUDAExecutionProvider", "OpenVINOExecutionProvider", "CPUExecutionProvider"]
|
SUPPORTED_PROVIDERS = ["CUDAExecutionProvider", "OpenVINOExecutionProvider", "CPUExecutionProvider"]
|
||||||
|
|
||||||
|
RKNN_SUPPORTED_SOCS = ["rk3566", "rk3588"]
|
||||||
|
RKNN_COREMASK_SUPPORTED_SOCS = ["rk3576", "rk3588"]
|
||||||
|
|
||||||
|
|
||||||
def get_model_source(model_name: str) -> ModelSource | None:
|
def get_model_source(model_name: str) -> ModelSource | None:
|
||||||
cleaned_name = clean_name(model_name)
|
cleaned_name = clean_name(model_name)
|
||||||
|
@ -31,7 +31,7 @@ class FaceRecognizer(InferenceModel):
|
|||||||
self._add_batch_axis(self.model_path)
|
self._add_batch_axis(self.model_path)
|
||||||
session = self._make_session(self.model_path)
|
session = self._make_session(self.model_path)
|
||||||
self.model = ArcFaceONNX(
|
self.model = ArcFaceONNX(
|
||||||
self.model_path.with_suffix(".onnx").as_posix(),
|
self.model_path_for_format(ModelFormat.ONNX).as_posix(),
|
||||||
session=session,
|
session=session,
|
||||||
)
|
)
|
||||||
return session
|
return session
|
||||||
|
@ -9,16 +9,14 @@ import numpy as np
|
|||||||
from numpy.typing import NDArray
|
from numpy.typing import NDArray
|
||||||
|
|
||||||
from app.config import log
|
from app.config import log
|
||||||
|
from app.models.constants import RKNN_COREMASK_SUPPORTED_SOCS, RKNN_SUPPORTED_SOCS
|
||||||
supported_socs = ["rk3566", "rk3588"]
|
|
||||||
coremask_supported_socs = ["rk3576", "rk3588"]
|
|
||||||
|
|
||||||
|
|
||||||
def get_soc(device_tree_path: Path | str) -> str | None:
|
def get_soc(device_tree_path: Path | str) -> str | None:
|
||||||
try:
|
try:
|
||||||
with Path(device_tree_path).open() as f:
|
with Path(device_tree_path).open() as f:
|
||||||
device_compatible_str = f.read()
|
device_compatible_str = f.read()
|
||||||
for soc in supported_socs:
|
for soc in RKNN_SUPPORTED_SOCS:
|
||||||
if soc in device_compatible_str:
|
if soc in device_compatible_str:
|
||||||
return soc
|
return soc
|
||||||
log.warning("Device is not supported for RKNN")
|
log.warning("Device is not supported for RKNN")
|
||||||
@ -46,7 +44,7 @@ def init_rknn(model_path: str) -> "RKNNLite":
|
|||||||
if ret != 0:
|
if ret != 0:
|
||||||
raise RuntimeError("Load RKNN rknnModel failed")
|
raise RuntimeError("Load RKNN rknnModel failed")
|
||||||
|
|
||||||
if soc_name in coremask_supported_socs:
|
if soc_name in RKNN_COREMASK_SUPPORTED_SOCS:
|
||||||
ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_AUTO)
|
ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_AUTO)
|
||||||
else:
|
else:
|
||||||
ret = rknn_lite.init_runtime() # Please do not set this parameter on other platforms.
|
ret = rknn_lite.init_runtime() # Please do not set this parameter on other platforms.
|
||||||
|
1
machine-learning/export/.python-version
Normal file
1
machine-learning/export/.python-version
Normal file
@ -0,0 +1 @@
|
|||||||
|
3.12
|
96
machine-learning/export/immich_model_exporter/export.py
Normal file
96
machine-learning/export/immich_model_exporter/export.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import typer
|
||||||
|
from exporters.constants import SOURCE_TO_METADATA, ModelSource
|
||||||
|
from exporters.onnx import export as onnx_export
|
||||||
|
from exporters.rknn import export as rknn_export
|
||||||
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
|
app = typer.Typer(pretty_exceptions_show_locals=False)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_readme(model_name: str, model_source: ModelSource) -> str:
|
||||||
|
(name, link, type) = SOURCE_TO_METADATA[model_source]
|
||||||
|
match model_source:
|
||||||
|
case ModelSource.MCLIP:
|
||||||
|
tags = ["immich", "clip", "multilingual"]
|
||||||
|
case ModelSource.OPENCLIP:
|
||||||
|
tags = ["immich", "clip"]
|
||||||
|
lowered = model_name.lower()
|
||||||
|
if "xlm" in lowered or "nllb" in lowered:
|
||||||
|
tags.append("multilingual")
|
||||||
|
case ModelSource.INSIGHTFACE:
|
||||||
|
tags = ["immich", "facial-recognition"]
|
||||||
|
case _:
|
||||||
|
raise ValueError(f"Unsupported model source {model_source}")
|
||||||
|
|
||||||
|
return f"""---
|
||||||
|
tags:
|
||||||
|
{" - " + "\n - ".join(tags)}
|
||||||
|
---
|
||||||
|
# Model Description
|
||||||
|
|
||||||
|
This repo contains ONNX exports for the associated {type} model by {name}. See the [{name}]({link}) repo for more info.
|
||||||
|
|
||||||
|
This repo is specifically intended for use with [Immich](https://immich.app/), a self-hosted photo library.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@app.command()
|
||||||
|
def main(
|
||||||
|
model_name: str,
|
||||||
|
model_source: ModelSource,
|
||||||
|
output_dir: Path = Path("./models"),
|
||||||
|
no_cache: bool = False,
|
||||||
|
hf_organization: str = "immich-app",
|
||||||
|
hf_auth_token: Annotated[str | None, typer.Option(envvar="HF_AUTH_TOKEN")] = None,
|
||||||
|
):
|
||||||
|
hf_model_name = model_name.replace("xlm-roberta-large", "XLM-Roberta-Large")
|
||||||
|
hf_model_name = hf_model_name.replace("xlm-roberta-base", "XLM-Roberta-Base")
|
||||||
|
output_dir = output_dir / hf_model_name
|
||||||
|
match model_source:
|
||||||
|
case ModelSource.MCLIP | ModelSource.OPENCLIP:
|
||||||
|
output_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
onnx_export(model_name, model_source, output_dir, no_cache=no_cache)
|
||||||
|
case ModelSource.INSIGHTFACE:
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
|
||||||
|
# TODO: start from insightface dump instead of downloading from HF
|
||||||
|
snapshot_download(f"immich-app/{hf_model_name}", local_dir=output_dir)
|
||||||
|
case _:
|
||||||
|
raise ValueError(f"Unsupported model source {model_source}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
rknn_export(output_dir, no_cache=no_cache)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to export model {model_name} to rknn: {e}")
|
||||||
|
(output_dir / "rknpu").unlink(missing_ok=True)
|
||||||
|
|
||||||
|
readme_path = output_dir / "README.md"
|
||||||
|
if no_cache or not readme_path.exists():
|
||||||
|
with open(readme_path, "w") as f:
|
||||||
|
f.write(generate_readme(model_name, model_source))
|
||||||
|
|
||||||
|
if hf_auth_token is not None:
|
||||||
|
from huggingface_hub import create_repo, upload_folder
|
||||||
|
|
||||||
|
repo_id = f"{hf_organization}/{hf_model_name}"
|
||||||
|
create_repo(repo_id, exist_ok=True, token=hf_auth_token)
|
||||||
|
|
||||||
|
# glob to delete old UUID blobs when reuploading models
|
||||||
|
uuid_char = "[a-fA-F0-9]"
|
||||||
|
uuid_glob = (
|
||||||
|
uuid_char * 8 + "-" + uuid_char * 4 + "-" + uuid_char * 4 + "-" + uuid_char * 4 + "-" + uuid_char * 12
|
||||||
|
)
|
||||||
|
upload_folder(
|
||||||
|
repo_id=repo_id,
|
||||||
|
folder_path=output_dir,
|
||||||
|
# remote repo files to be deleted before uploading
|
||||||
|
# deletion is in the same commit as the upload, so it's atomic
|
||||||
|
delete_patterns=[f"**/{uuid_glob}"],
|
||||||
|
token=hf_auth_token,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
typer.run(main)
|
@ -0,0 +1,23 @@
|
|||||||
|
from enum import StrEnum
|
||||||
|
from typing import NamedTuple
|
||||||
|
|
||||||
|
|
||||||
|
class ModelSource(StrEnum):
|
||||||
|
INSIGHTFACE = "insightface"
|
||||||
|
MCLIP = "mclip"
|
||||||
|
OPENCLIP = "openclip"
|
||||||
|
|
||||||
|
|
||||||
|
class SourceMetadata(NamedTuple):
|
||||||
|
name: str
|
||||||
|
link: str
|
||||||
|
type: str
|
||||||
|
|
||||||
|
|
||||||
|
SOURCE_TO_METADATA = {
|
||||||
|
ModelSource.MCLIP: SourceMetadata("M-CLIP", "https://huggingface.co/M-CLIP", "CLIP"),
|
||||||
|
ModelSource.OPENCLIP: SourceMetadata("OpenCLIP", "https://github.com/mlfoundations/open_clip", "CLIP"),
|
||||||
|
ModelSource.INSIGHTFACE: SourceMetadata(
|
||||||
|
"InsightFace", "https://github.com/deepinsight/insightface/tree/master", "facial recognition"
|
||||||
|
),
|
||||||
|
}
|
@ -0,0 +1,20 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from ..constants import ModelSource
|
||||||
|
from .models import mclip, openclip
|
||||||
|
|
||||||
|
|
||||||
|
def export(
|
||||||
|
model_name: str, model_source: ModelSource, output_dir: Path, opset_version: int = 19, no_cache: bool = False
|
||||||
|
) -> None:
|
||||||
|
visual_dir = output_dir / "visual"
|
||||||
|
textual_dir = output_dir / "textual"
|
||||||
|
match model_source:
|
||||||
|
case ModelSource.MCLIP:
|
||||||
|
mclip.to_onnx(model_name, opset_version, visual_dir, textual_dir, no_cache=no_cache)
|
||||||
|
case ModelSource.OPENCLIP:
|
||||||
|
name, _, pretrained = model_name.partition("__")
|
||||||
|
config = openclip.OpenCLIPModelConfig(name, pretrained)
|
||||||
|
openclip.to_onnx(config, opset_version, visual_dir, textual_dir, no_cache=no_cache)
|
||||||
|
case _:
|
||||||
|
raise ValueError(f"Unsupported model source {model_source}")
|
@ -1,12 +1,6 @@
|
|||||||
import os
|
|
||||||
import tempfile
|
|
||||||
import warnings
|
import warnings
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import torch
|
|
||||||
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
|
|
||||||
from transformers import AutoTokenizer
|
|
||||||
|
|
||||||
from .openclip import OpenCLIPModelConfig
|
from .openclip import OpenCLIPModelConfig
|
||||||
from .openclip import to_onnx as openclip_to_onnx
|
from .openclip import to_onnx as openclip_to_onnx
|
||||||
from .util import get_model_path
|
from .util import get_model_path
|
||||||
@ -21,25 +15,38 @@ _MCLIP_TO_OPENCLIP = {
|
|||||||
|
|
||||||
def to_onnx(
|
def to_onnx(
|
||||||
model_name: str,
|
model_name: str,
|
||||||
|
opset_version: int,
|
||||||
output_dir_visual: Path | str,
|
output_dir_visual: Path | str,
|
||||||
output_dir_textual: Path | str,
|
output_dir_textual: Path | str,
|
||||||
|
no_cache: bool = False,
|
||||||
) -> tuple[Path, Path]:
|
) -> tuple[Path, Path]:
|
||||||
textual_path = get_model_path(output_dir_textual)
|
textual_path = get_model_path(output_dir_textual)
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
if no_cache or not textual_path.exists():
|
||||||
model = MultilingualCLIP.from_pretrained(model_name, cache_dir=os.environ.get("CACHE_DIR", tmpdir))
|
import torch
|
||||||
|
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
torch.backends.mha.set_fastpath_enabled(False)
|
||||||
|
|
||||||
|
model = MultilingualCLIP.from_pretrained(model_name)
|
||||||
AutoTokenizer.from_pretrained(model_name).save_pretrained(output_dir_textual)
|
AutoTokenizer.from_pretrained(model_name).save_pretrained(output_dir_textual)
|
||||||
|
|
||||||
model.eval()
|
model.eval()
|
||||||
for param in model.parameters():
|
for param in model.parameters():
|
||||||
param.requires_grad_(False)
|
param.requires_grad_(False)
|
||||||
|
|
||||||
export_text_encoder(model, textual_path)
|
_export_text_encoder(model, textual_path, opset_version)
|
||||||
visual_path, _ = openclip_to_onnx(_MCLIP_TO_OPENCLIP[model_name], output_dir_visual)
|
else:
|
||||||
|
print(f"Model {textual_path} already exists, skipping")
|
||||||
|
visual_path, _ = openclip_to_onnx(_MCLIP_TO_OPENCLIP[model_name], output_dir_visual, no_cache=no_cache)
|
||||||
assert visual_path is not None, "Visual model export failed"
|
assert visual_path is not None, "Visual model export failed"
|
||||||
return visual_path, textual_path
|
return visual_path, textual_path
|
||||||
|
|
||||||
|
|
||||||
def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> None:
|
def _export_text_encoder(model: "MultilingualCLIP", output_path: Path | str, opset_version: int) -> None:
|
||||||
|
import torch
|
||||||
|
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
|
||||||
|
|
||||||
output_path = Path(output_path)
|
output_path = Path(output_path)
|
||||||
|
|
||||||
def forward(self: MultilingualCLIP, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
|
def forward(self: MultilingualCLIP, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
|
||||||
@ -61,7 +68,7 @@ def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> Non
|
|||||||
output_path.as_posix(),
|
output_path.as_posix(),
|
||||||
input_names=["input_ids", "attention_mask"],
|
input_names=["input_ids", "attention_mask"],
|
||||||
output_names=["embedding"],
|
output_names=["embedding"],
|
||||||
opset_version=17,
|
opset_version=opset_version,
|
||||||
# dynamic_axes={
|
# dynamic_axes={
|
||||||
# "input_ids": {0: "batch_size", 1: "sequence_length"},
|
# "input_ids": {0: "batch_size", 1: "sequence_length"},
|
||||||
# "attention_mask": {0: "batch_size", 1: "sequence_length"},
|
# "attention_mask": {0: "batch_size", 1: "sequence_length"},
|
@ -0,0 +1,149 @@
|
|||||||
|
import warnings
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from functools import cached_property
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from .util import get_model_path, save_config
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class OpenCLIPModelConfig:
|
||||||
|
name: str
|
||||||
|
pretrained: str
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def model_config(self) -> dict[str, Any]:
|
||||||
|
import open_clip
|
||||||
|
|
||||||
|
config = open_clip.get_model_config(self.name)
|
||||||
|
if config is None:
|
||||||
|
raise ValueError(f"Unknown model {self.name}")
|
||||||
|
return config
|
||||||
|
|
||||||
|
@property
|
||||||
|
def image_size(self) -> int:
|
||||||
|
return self.model_config["vision_cfg"]["image_size"]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def sequence_length(self) -> int:
|
||||||
|
return self.model_config["text_cfg"].get("context_length", 77)
|
||||||
|
|
||||||
|
|
||||||
|
def to_onnx(
|
||||||
|
model_cfg: OpenCLIPModelConfig,
|
||||||
|
opset_version: int,
|
||||||
|
output_dir_visual: Path | str | None = None,
|
||||||
|
output_dir_textual: Path | str | None = None,
|
||||||
|
no_cache: bool = False,
|
||||||
|
) -> tuple[Path | None, Path | None]:
|
||||||
|
visual_path = None
|
||||||
|
textual_path = None
|
||||||
|
if output_dir_visual is not None:
|
||||||
|
output_dir_visual = Path(output_dir_visual)
|
||||||
|
visual_path = get_model_path(output_dir_visual)
|
||||||
|
|
||||||
|
if output_dir_textual is not None:
|
||||||
|
output_dir_textual = Path(output_dir_textual)
|
||||||
|
textual_path = get_model_path(output_dir_textual)
|
||||||
|
|
||||||
|
if not no_cache and (
|
||||||
|
(textual_path is None or textual_path.exists()) and (visual_path is None or visual_path.exists())
|
||||||
|
):
|
||||||
|
print(f"Models {textual_path} and {visual_path} already exist, skipping")
|
||||||
|
return visual_path, textual_path
|
||||||
|
|
||||||
|
import open_clip
|
||||||
|
import torch
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
torch.backends.mha.set_fastpath_enabled(False)
|
||||||
|
|
||||||
|
model = open_clip.create_model(
|
||||||
|
model_cfg.name,
|
||||||
|
pretrained=model_cfg.pretrained,
|
||||||
|
jit=False,
|
||||||
|
require_pretrained=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
text_vision_cfg = open_clip.get_model_config(model_cfg.name)
|
||||||
|
|
||||||
|
model.eval()
|
||||||
|
for param in model.parameters():
|
||||||
|
param.requires_grad_(False)
|
||||||
|
|
||||||
|
if visual_path is not None:
|
||||||
|
if no_cache or not visual_path.exists():
|
||||||
|
save_config(
|
||||||
|
open_clip.get_model_preprocess_cfg(model),
|
||||||
|
output_dir_visual / "preprocess_cfg.json",
|
||||||
|
)
|
||||||
|
save_config(text_vision_cfg, output_dir_visual.parent / "config.json")
|
||||||
|
_export_image_encoder(model, model_cfg, visual_path, opset_version)
|
||||||
|
else:
|
||||||
|
print(f"Model {visual_path} already exists, skipping")
|
||||||
|
|
||||||
|
if textual_path is not None:
|
||||||
|
if no_cache or not textual_path.exists():
|
||||||
|
tokenizer_name = text_vision_cfg["text_cfg"].get("hf_tokenizer_name", "openai/clip-vit-base-patch32")
|
||||||
|
AutoTokenizer.from_pretrained(tokenizer_name).save_pretrained(output_dir_textual)
|
||||||
|
_export_text_encoder(model, model_cfg, textual_path, opset_version)
|
||||||
|
else:
|
||||||
|
print(f"Model {textual_path} already exists, skipping")
|
||||||
|
return visual_path, textual_path
|
||||||
|
|
||||||
|
|
||||||
|
def _export_image_encoder(
|
||||||
|
model: "open_clip.CLIP", model_cfg: OpenCLIPModelConfig, output_path: Path | str, opset_version: int
|
||||||
|
) -> None:
|
||||||
|
import torch
|
||||||
|
|
||||||
|
output_path = Path(output_path)
|
||||||
|
|
||||||
|
def encode_image(image: torch.Tensor) -> torch.Tensor:
|
||||||
|
output = model.encode_image(image, normalize=True)
|
||||||
|
assert isinstance(output, torch.Tensor)
|
||||||
|
return output
|
||||||
|
|
||||||
|
args = (torch.randn(1, 3, model_cfg.image_size, model_cfg.image_size),)
|
||||||
|
traced = torch.jit.trace(encode_image, args) # type: ignore[no-untyped-call]
|
||||||
|
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("ignore", UserWarning)
|
||||||
|
torch.onnx.export(
|
||||||
|
traced,
|
||||||
|
args,
|
||||||
|
output_path.as_posix(),
|
||||||
|
input_names=["image"],
|
||||||
|
output_names=["embedding"],
|
||||||
|
opset_version=opset_version,
|
||||||
|
# dynamic_axes={"image": {0: "batch_size"}},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _export_text_encoder(
|
||||||
|
model: "open_clip.CLIP", model_cfg: OpenCLIPModelConfig, output_path: Path | str, opset_version: int
|
||||||
|
) -> None:
|
||||||
|
import torch
|
||||||
|
|
||||||
|
output_path = Path(output_path)
|
||||||
|
|
||||||
|
def encode_text(text: torch.Tensor) -> torch.Tensor:
|
||||||
|
output = model.encode_text(text, normalize=True)
|
||||||
|
assert isinstance(output, torch.Tensor)
|
||||||
|
return output
|
||||||
|
|
||||||
|
args = (torch.ones(1, model_cfg.sequence_length, dtype=torch.int32),)
|
||||||
|
traced = torch.jit.trace(encode_text, args) # type: ignore[no-untyped-call]
|
||||||
|
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("ignore", UserWarning)
|
||||||
|
torch.onnx.export(
|
||||||
|
traced,
|
||||||
|
args,
|
||||||
|
output_path.as_posix(),
|
||||||
|
input_names=["text"],
|
||||||
|
output_names=["embedding"],
|
||||||
|
opset_version=opset_version,
|
||||||
|
# dynamic_axes={"text": {0: "batch_size"}},
|
||||||
|
)
|
@ -0,0 +1,77 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
RKNN_SOCS = ["rk3566", "rk3576", "rk3588"]
|
||||||
|
|
||||||
|
|
||||||
|
def _export_platform(
|
||||||
|
model_dir: Path,
|
||||||
|
target_platform: str,
|
||||||
|
dynamic_input=None,
|
||||||
|
fuse_matmul_softmax_matmul_to_sdpa: bool = True,
|
||||||
|
no_cache: bool = False,
|
||||||
|
):
|
||||||
|
from rknn.api import RKNN
|
||||||
|
|
||||||
|
input_path = model_dir / "model.onnx"
|
||||||
|
output_path = model_dir / "rknpu" / target_platform / "model.rknn"
|
||||||
|
if not no_cache and output_path.exists():
|
||||||
|
print(f"Model {input_path} already exists at {output_path}, skipping")
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"Exporting model {input_path} to {output_path}")
|
||||||
|
|
||||||
|
rknn = RKNN(verbose=False)
|
||||||
|
|
||||||
|
rknn.config(
|
||||||
|
target_platform=target_platform,
|
||||||
|
dynamic_input=dynamic_input,
|
||||||
|
disable_rules=["fuse_matmul_softmax_matmul_to_sdpa"] if not fuse_matmul_softmax_matmul_to_sdpa else [],
|
||||||
|
enable_flash_attention=True,
|
||||||
|
model_pruning=True,
|
||||||
|
)
|
||||||
|
ret = rknn.load_onnx(model=input_path.as_posix())
|
||||||
|
|
||||||
|
if ret != 0:
|
||||||
|
raise RuntimeError("Load failed!")
|
||||||
|
|
||||||
|
ret = rknn.build(do_quantization=False)
|
||||||
|
|
||||||
|
if ret != 0:
|
||||||
|
raise RuntimeError("Build failed!")
|
||||||
|
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
ret = rknn.export_rknn(output_path.as_posix())
|
||||||
|
if ret != 0:
|
||||||
|
raise RuntimeError("Export rknn model failed!")
|
||||||
|
|
||||||
|
|
||||||
|
def _export_platforms(model_dir: Path, dynamic_input=None, no_cache: bool = False):
|
||||||
|
fuse_matmul_softmax_matmul_to_sdpa = True
|
||||||
|
for soc in RKNN_SOCS:
|
||||||
|
try:
|
||||||
|
_export_platform(model_dir, soc, dynamic_input, fuse_matmul_softmax_matmul_to_sdpa)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to export model for {soc}: {e}")
|
||||||
|
if "inputs or 'outputs' must be set" in str(e):
|
||||||
|
print("Retrying without fuse_matmul_softmax_matmul_to_sdpa")
|
||||||
|
fuse_matmul_softmax_matmul_to_sdpa = False
|
||||||
|
_export_platform(model_dir, soc, dynamic_input, fuse_matmul_softmax_matmul_to_sdpa)
|
||||||
|
|
||||||
|
|
||||||
|
def export(model_dir: Path, no_cache: bool = False):
|
||||||
|
textual = model_dir / "textual"
|
||||||
|
visual = model_dir / "visual"
|
||||||
|
detection = model_dir / "detection"
|
||||||
|
recognition = model_dir / "recognition"
|
||||||
|
|
||||||
|
if textual.is_dir():
|
||||||
|
_export_platforms(textual, no_cache=no_cache)
|
||||||
|
|
||||||
|
if visual.is_dir():
|
||||||
|
_export_platforms(visual, no_cache=no_cache)
|
||||||
|
|
||||||
|
if detection.is_dir():
|
||||||
|
_export_platforms(detection, dynamic_input=[[[1, 3, 640, 640]]], no_cache=no_cache)
|
||||||
|
|
||||||
|
if recognition.is_dir():
|
||||||
|
_export_platforms(recognition, dynamic_input=[[[1, 3, 112, 112]]], no_cache=no_cache)
|
71
machine-learning/export/immich_model_exporter/run.py
Normal file
71
machine-learning/export/immich_model_exporter/run.py
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
import subprocess
|
||||||
|
|
||||||
|
models = [
|
||||||
|
# "ViT-B-16-SigLIP__webli",
|
||||||
|
# "ViT-B-16-SigLIP-256__webli",
|
||||||
|
# "ViT-B-16-SigLIP-384__webli",
|
||||||
|
# "ViT-B-16-SigLIP-512__webli",
|
||||||
|
# "ViT-B-16-SigLIP-i18n-256__webli",
|
||||||
|
# "ViT-B-16-plus-240__laion400m_e31",
|
||||||
|
# "ViT-B-16-plus-240__laion400m_e32",
|
||||||
|
# "ViT-B-16__laion400m_e31",
|
||||||
|
# "ViT-B-16__laion400m_e32",
|
||||||
|
# "ViT-B-16__openai",
|
||||||
|
# "ViT-B-32__laion2b-s34b-b79k",
|
||||||
|
# "ViT-B-32__laion2b_e16",
|
||||||
|
# "ViT-B-32__laion400m_e31",
|
||||||
|
# "ViT-B-32__laion400m_e32",
|
||||||
|
# "ViT-B-32__openai",
|
||||||
|
# "ViT-L-14-336__openai",
|
||||||
|
# "ViT-B-16-SigLIP2__webli",
|
||||||
|
# "ViT-B-32-SigLIP2-256__webli",
|
||||||
|
# "ViT-B-32-SigLIP2-384__webli", # not available yet
|
||||||
|
# "ViT-B-32-SigLIP2-512__webli", # not available yet
|
||||||
|
# "ViT-L-16-SigLIP2-256__webli",
|
||||||
|
# "ViT-L-16-SigLIP2-384__webli", # rknn seems to hang
|
||||||
|
# "ViT-L-16-SigLIP2-512__webli",
|
||||||
|
"ViT-SO400M-14-SigLIP2__webli",
|
||||||
|
"ViT-SO400M-14-SigLIP2-378__webli",
|
||||||
|
"ViT-SO400M-16-SigLIP2-256__webli",
|
||||||
|
"ViT-SO400M-16-SigLIP2-384__webli",
|
||||||
|
"ViT-SO400M-16-SigLIP2-512__webli",
|
||||||
|
# "ViT-gopt-16-SigLIP2-256__webli",
|
||||||
|
# "ViT-gopt-16-SigLIP2-384__webli",
|
||||||
|
# "ViT-L-14-quickgelu__dfn2b",
|
||||||
|
# "ViT-L-14__laion2b-s32b-b82k",
|
||||||
|
# "ViT-L-14__laion400m_e31",
|
||||||
|
# "ViT-L-14__laion400m_e32",
|
||||||
|
# "ViT-L-14__openai",
|
||||||
|
# "ViT-L-16-SigLIP-256__webli",
|
||||||
|
# "ViT-L-16-SigLIP-384__webli",
|
||||||
|
# "ViT-SO400M-14-SigLIP-384__webli",
|
||||||
|
# "ViT-H-14__laion2b-s32b-b79k",
|
||||||
|
# "ViT-H-14-quickgelu__dfn5b",
|
||||||
|
# "ViT-H-14-378-quickgelu__dfn5b",
|
||||||
|
# "RN101__openai",
|
||||||
|
# "RN101__yfcc15m",
|
||||||
|
# "RN50__cc12m",
|
||||||
|
# "RN50__openai",
|
||||||
|
# "RN50__yfcc15m",
|
||||||
|
# "RN50x16__openai",
|
||||||
|
# "RN50x4__openai",
|
||||||
|
# "RN50x64__openai",
|
||||||
|
# "nllb-clip-base-siglip__mrl",
|
||||||
|
# "nllb-clip-base-siglip__v1",
|
||||||
|
# "nllb-clip-large-siglip__mrl",
|
||||||
|
# "nllb-clip-large-siglip__v1",
|
||||||
|
# "xlm-roberta-base-ViT-B-32__laion5b_s13b_b90k",
|
||||||
|
# "xlm-roberta-large-ViT-H-14__frozen_laion5b_s13b_b90k",
|
||||||
|
# "M-CLIP/LABSE-Vit-L-14",
|
||||||
|
# "M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
|
||||||
|
# "M-CLIP/XLM-Roberta-Large-Vit-B-32",
|
||||||
|
# "M-CLIP/XLM-Roberta-Large-Vit-L-14",
|
||||||
|
]
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
for model in models:
|
||||||
|
try:
|
||||||
|
print(f"Exporting model {model}")
|
||||||
|
subprocess.check_call(["python", "export.py", model, "openclip"])
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to export model {model}: {e}")
|
@ -1,20 +0,0 @@
|
|||||||
FROM mambaorg/micromamba:bookworm-slim@sha256:e3797091302382ea841498bc93a7b0a50f7c1448333d5e946d2d1608d0c5f43d AS builder
|
|
||||||
|
|
||||||
ENV TRANSFORMERS_CACHE=/cache \
|
|
||||||
PYTHONDONTWRITEBYTECODE=1 \
|
|
||||||
PYTHONUNBUFFERED=1 \
|
|
||||||
PATH="/opt/venv/bin:$PATH" \
|
|
||||||
PYTHONPATH=/usr/src
|
|
||||||
|
|
||||||
COPY --chown=$MAMBA_USER:$MAMBA_USER conda-lock.yml /tmp/conda-lock.yml
|
|
||||||
RUN micromamba install -y -n base -f /tmp/conda-lock.yml && \
|
|
||||||
micromamba remove -y -n base cxx-compiler && \
|
|
||||||
micromamba clean --all --yes
|
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
|
||||||
|
|
||||||
COPY --chown=$MAMBA_USER:$MAMBA_USER start.sh .
|
|
||||||
COPY --chown=$MAMBA_USER:$MAMBA_USER app .
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/_entrypoint.sh"]
|
|
||||||
CMD ["./start.sh"]
|
|
File diff suppressed because it is too large
Load Diff
@ -1,15 +0,0 @@
|
|||||||
name: base
|
|
||||||
channels:
|
|
||||||
- conda-forge
|
|
||||||
platforms:
|
|
||||||
- linux-64
|
|
||||||
- linux-aarch64
|
|
||||||
dependencies:
|
|
||||||
- black
|
|
||||||
- conda-lock
|
|
||||||
- mypy
|
|
||||||
- pytest
|
|
||||||
- pytest-cov
|
|
||||||
- pytest-mock
|
|
||||||
- ruff
|
|
||||||
category: dev
|
|
@ -1,25 +0,0 @@
|
|||||||
name: base
|
|
||||||
channels:
|
|
||||||
- conda-forge
|
|
||||||
- nvidia
|
|
||||||
- pytorch
|
|
||||||
platforms:
|
|
||||||
- linux-64
|
|
||||||
dependencies:
|
|
||||||
- cxx-compiler
|
|
||||||
- onnx==1.*
|
|
||||||
- onnxruntime==1.*
|
|
||||||
- open-clip-torch==2.*
|
|
||||||
- orjson==3.*
|
|
||||||
- pip
|
|
||||||
- python==3.11.*
|
|
||||||
- pytorch>=2.3
|
|
||||||
- rich==13.*
|
|
||||||
- safetensors==0.*
|
|
||||||
- setuptools==68.*
|
|
||||||
- torchvision
|
|
||||||
- transformers==4.*
|
|
||||||
- pip:
|
|
||||||
- multilingual-clip
|
|
||||||
- onnxsim
|
|
||||||
category: main
|
|
@ -1,114 +0,0 @@
|
|||||||
import os
|
|
||||||
import tempfile
|
|
||||||
import warnings
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import open_clip
|
|
||||||
import torch
|
|
||||||
from transformers import AutoTokenizer
|
|
||||||
|
|
||||||
from .util import get_model_path, save_config
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class OpenCLIPModelConfig:
|
|
||||||
name: str
|
|
||||||
pretrained: str
|
|
||||||
image_size: int = field(init=False)
|
|
||||||
sequence_length: int = field(init=False)
|
|
||||||
|
|
||||||
def __post_init__(self) -> None:
|
|
||||||
open_clip_cfg = open_clip.get_model_config(self.name)
|
|
||||||
if open_clip_cfg is None:
|
|
||||||
raise ValueError(f"Unknown model {self.name}")
|
|
||||||
self.image_size = open_clip_cfg["vision_cfg"]["image_size"]
|
|
||||||
self.sequence_length = open_clip_cfg["text_cfg"].get("context_length", 77)
|
|
||||||
|
|
||||||
|
|
||||||
def to_onnx(
|
|
||||||
model_cfg: OpenCLIPModelConfig,
|
|
||||||
output_dir_visual: Path | str | None = None,
|
|
||||||
output_dir_textual: Path | str | None = None,
|
|
||||||
) -> tuple[Path | None, Path | None]:
|
|
||||||
visual_path = None
|
|
||||||
textual_path = None
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
|
||||||
model = open_clip.create_model(
|
|
||||||
model_cfg.name,
|
|
||||||
pretrained=model_cfg.pretrained,
|
|
||||||
jit=False,
|
|
||||||
cache_dir=os.environ.get("CACHE_DIR", tmpdir),
|
|
||||||
require_pretrained=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
text_vision_cfg = open_clip.get_model_config(model_cfg.name)
|
|
||||||
|
|
||||||
model.eval()
|
|
||||||
for param in model.parameters():
|
|
||||||
param.requires_grad_(False)
|
|
||||||
|
|
||||||
if output_dir_visual is not None:
|
|
||||||
output_dir_visual = Path(output_dir_visual)
|
|
||||||
visual_path = get_model_path(output_dir_visual)
|
|
||||||
|
|
||||||
save_config(open_clip.get_model_preprocess_cfg(model), output_dir_visual / "preprocess_cfg.json")
|
|
||||||
save_config(text_vision_cfg, output_dir_visual.parent / "config.json")
|
|
||||||
export_image_encoder(model, model_cfg, visual_path)
|
|
||||||
|
|
||||||
if output_dir_textual is not None:
|
|
||||||
output_dir_textual = Path(output_dir_textual)
|
|
||||||
textual_path = get_model_path(output_dir_textual)
|
|
||||||
|
|
||||||
tokenizer_name = text_vision_cfg["text_cfg"].get("hf_tokenizer_name", "openai/clip-vit-base-patch32")
|
|
||||||
AutoTokenizer.from_pretrained(tokenizer_name).save_pretrained(output_dir_textual)
|
|
||||||
export_text_encoder(model, model_cfg, textual_path)
|
|
||||||
return visual_path, textual_path
|
|
||||||
|
|
||||||
|
|
||||||
def export_image_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig, output_path: Path | str) -> None:
|
|
||||||
output_path = Path(output_path)
|
|
||||||
|
|
||||||
def encode_image(image: torch.Tensor) -> torch.Tensor:
|
|
||||||
output = model.encode_image(image, normalize=True)
|
|
||||||
assert isinstance(output, torch.Tensor)
|
|
||||||
return output
|
|
||||||
|
|
||||||
args = (torch.randn(1, 3, model_cfg.image_size, model_cfg.image_size),)
|
|
||||||
traced = torch.jit.trace(encode_image, args) # type: ignore[no-untyped-call]
|
|
||||||
|
|
||||||
with warnings.catch_warnings():
|
|
||||||
warnings.simplefilter("ignore", UserWarning)
|
|
||||||
torch.onnx.export(
|
|
||||||
traced,
|
|
||||||
args,
|
|
||||||
output_path.as_posix(),
|
|
||||||
input_names=["image"],
|
|
||||||
output_names=["embedding"],
|
|
||||||
opset_version=17,
|
|
||||||
# dynamic_axes={"image": {0: "batch_size"}},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def export_text_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig, output_path: Path | str) -> None:
|
|
||||||
output_path = Path(output_path)
|
|
||||||
|
|
||||||
def encode_text(text: torch.Tensor) -> torch.Tensor:
|
|
||||||
output = model.encode_text(text, normalize=True)
|
|
||||||
assert isinstance(output, torch.Tensor)
|
|
||||||
return output
|
|
||||||
|
|
||||||
args = (torch.ones(1, model_cfg.sequence_length, dtype=torch.int32),)
|
|
||||||
traced = torch.jit.trace(encode_text, args) # type: ignore[no-untyped-call]
|
|
||||||
|
|
||||||
with warnings.catch_warnings():
|
|
||||||
warnings.simplefilter("ignore", UserWarning)
|
|
||||||
torch.onnx.export(
|
|
||||||
traced,
|
|
||||||
args,
|
|
||||||
output_path.as_posix(),
|
|
||||||
input_names=["text"],
|
|
||||||
output_names=["embedding"],
|
|
||||||
opset_version=17,
|
|
||||||
# dynamic_axes={"text": {0: "batch_size"}},
|
|
||||||
)
|
|
@ -1,49 +0,0 @@
|
|||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import onnx
|
|
||||||
import onnxruntime as ort
|
|
||||||
import onnxsim
|
|
||||||
|
|
||||||
|
|
||||||
def save_onnx(model: onnx.ModelProto, output_path: Path | str) -> None:
|
|
||||||
try:
|
|
||||||
onnx.save(model, output_path)
|
|
||||||
except ValueError as e:
|
|
||||||
if "The proto size is larger than the 2 GB limit." in str(e):
|
|
||||||
onnx.save(model, output_path, save_as_external_data=True, size_threshold=1_000_000)
|
|
||||||
else:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
|
|
||||||
def optimize_onnxsim(model_path: Path | str, output_path: Path | str) -> None:
|
|
||||||
model_path = Path(model_path)
|
|
||||||
output_path = Path(output_path)
|
|
||||||
model = onnx.load(model_path.as_posix())
|
|
||||||
model, check = onnxsim.simplify(model)
|
|
||||||
assert check, "Simplified ONNX model could not be validated"
|
|
||||||
for file in model_path.parent.iterdir():
|
|
||||||
if file.name.startswith("Constant") or "onnx" in file.name or file.suffix == ".weight":
|
|
||||||
file.unlink()
|
|
||||||
save_onnx(model, output_path)
|
|
||||||
|
|
||||||
|
|
||||||
def optimize_ort(
|
|
||||||
model_path: Path | str,
|
|
||||||
output_path: Path | str,
|
|
||||||
level: ort.GraphOptimizationLevel = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC,
|
|
||||||
) -> None:
|
|
||||||
model_path = Path(model_path)
|
|
||||||
output_path = Path(output_path)
|
|
||||||
|
|
||||||
sess_options = ort.SessionOptions()
|
|
||||||
sess_options.graph_optimization_level = level
|
|
||||||
sess_options.optimized_model_filepath = output_path.as_posix()
|
|
||||||
|
|
||||||
ort.InferenceSession(model_path.as_posix(), providers=["CPUExecutionProvider"], sess_options=sess_options)
|
|
||||||
|
|
||||||
|
|
||||||
def optimize(model_path: Path | str) -> None:
|
|
||||||
model_path = Path(model_path)
|
|
||||||
|
|
||||||
optimize_ort(model_path, model_path)
|
|
||||||
optimize_onnxsim(model_path, model_path)
|
|
@ -1,113 +0,0 @@
|
|||||||
import gc
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
from tempfile import TemporaryDirectory
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from huggingface_hub import create_repo, upload_folder
|
|
||||||
from models import mclip, openclip
|
|
||||||
from models.optimize import optimize
|
|
||||||
from rich.progress import Progress
|
|
||||||
|
|
||||||
models = [
|
|
||||||
"M-CLIP/LABSE-Vit-L-14",
|
|
||||||
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
|
|
||||||
"M-CLIP/XLM-Roberta-Large-Vit-B-32",
|
|
||||||
"M-CLIP/XLM-Roberta-Large-Vit-L-14",
|
|
||||||
"RN101::openai",
|
|
||||||
"RN101::yfcc15m",
|
|
||||||
"RN50::cc12m",
|
|
||||||
"RN50::openai",
|
|
||||||
"RN50::yfcc15m",
|
|
||||||
"RN50x16::openai",
|
|
||||||
"RN50x4::openai",
|
|
||||||
"RN50x64::openai",
|
|
||||||
"ViT-B-16-SigLIP-256::webli",
|
|
||||||
"ViT-B-16-SigLIP-384::webli",
|
|
||||||
"ViT-B-16-SigLIP-512::webli",
|
|
||||||
"ViT-B-16-SigLIP-i18n-256::webli",
|
|
||||||
"ViT-B-16-SigLIP::webli",
|
|
||||||
"ViT-B-16-plus-240::laion400m_e31",
|
|
||||||
"ViT-B-16-plus-240::laion400m_e32",
|
|
||||||
"ViT-B-16::laion400m_e31",
|
|
||||||
"ViT-B-16::laion400m_e32",
|
|
||||||
"ViT-B-16::openai",
|
|
||||||
"ViT-B-32::laion2b-s34b-b79k",
|
|
||||||
"ViT-B-32::laion2b_e16",
|
|
||||||
"ViT-B-32::laion400m_e31",
|
|
||||||
"ViT-B-32::laion400m_e32",
|
|
||||||
"ViT-B-32::openai",
|
|
||||||
"ViT-H-14-378-quickgelu::dfn5b",
|
|
||||||
"ViT-H-14-quickgelu::dfn5b",
|
|
||||||
"ViT-H-14::laion2b-s32b-b79k",
|
|
||||||
"ViT-L-14-336::openai",
|
|
||||||
"ViT-L-14-quickgelu::dfn2b",
|
|
||||||
"ViT-L-14::laion2b-s32b-b82k",
|
|
||||||
"ViT-L-14::laion400m_e31",
|
|
||||||
"ViT-L-14::laion400m_e32",
|
|
||||||
"ViT-L-14::openai",
|
|
||||||
"ViT-L-16-SigLIP-256::webli",
|
|
||||||
"ViT-L-16-SigLIP-384::webli",
|
|
||||||
"ViT-SO400M-14-SigLIP-384::webli",
|
|
||||||
"ViT-g-14::laion2b-s12b-b42k",
|
|
||||||
"nllb-clip-base-siglip::mrl",
|
|
||||||
"nllb-clip-base-siglip::v1",
|
|
||||||
"nllb-clip-large-siglip::mrl",
|
|
||||||
"nllb-clip-large-siglip::v1",
|
|
||||||
"xlm-roberta-base-ViT-B-32::laion5b_s13b_b90k",
|
|
||||||
"xlm-roberta-large-ViT-H-14::frozen_laion5b_s13b_b90k",
|
|
||||||
]
|
|
||||||
|
|
||||||
# glob to delete old UUID blobs when reuploading models
|
|
||||||
uuid_char = "[a-fA-F0-9]"
|
|
||||||
uuid_glob = uuid_char * 8 + "-" + uuid_char * 4 + "-" + uuid_char * 4 + "-" + uuid_char * 4 + "-" + uuid_char * 12
|
|
||||||
|
|
||||||
# remote repo files to be deleted before uploading
|
|
||||||
# deletion is in the same commit as the upload, so it's atomic
|
|
||||||
delete_patterns = ["**/*onnx*", "**/Constant*", "**/*.weight", "**/*.bias", f"**/{uuid_glob}"]
|
|
||||||
|
|
||||||
with Progress() as progress:
|
|
||||||
task = progress.add_task("[green]Exporting models...", total=len(models))
|
|
||||||
token = os.environ.get("HF_AUTH_TOKEN")
|
|
||||||
torch.backends.mha.set_fastpath_enabled(False)
|
|
||||||
with TemporaryDirectory() as tmp:
|
|
||||||
tmpdir = Path(tmp)
|
|
||||||
for model in models:
|
|
||||||
model_name = model.split("/")[-1].replace("::", "__")
|
|
||||||
hf_model_name = model_name.replace("xlm-roberta-large", "XLM-Roberta-Large")
|
|
||||||
hf_model_name = model_name.replace("xlm-roberta-base", "XLM-Roberta-Base")
|
|
||||||
config_path = tmpdir / model_name / "config.json"
|
|
||||||
|
|
||||||
def export() -> None:
|
|
||||||
progress.update(task, description=f"[green]Exporting {hf_model_name}")
|
|
||||||
visual_dir = tmpdir / hf_model_name / "visual"
|
|
||||||
textual_dir = tmpdir / hf_model_name / "textual"
|
|
||||||
if model.startswith("M-CLIP"):
|
|
||||||
visual_path, textual_path = mclip.to_onnx(model, visual_dir, textual_dir)
|
|
||||||
else:
|
|
||||||
name, _, pretrained = model_name.partition("__")
|
|
||||||
config = openclip.OpenCLIPModelConfig(name, pretrained)
|
|
||||||
visual_path, textual_path = openclip.to_onnx(config, visual_dir, textual_dir)
|
|
||||||
progress.update(task, description=f"[green]Optimizing {hf_model_name} (visual)")
|
|
||||||
optimize(visual_path)
|
|
||||||
progress.update(task, description=f"[green]Optimizing {hf_model_name} (textual)")
|
|
||||||
optimize(textual_path)
|
|
||||||
|
|
||||||
gc.collect()
|
|
||||||
|
|
||||||
def upload() -> None:
|
|
||||||
progress.update(task, description=f"[yellow]Uploading {hf_model_name}")
|
|
||||||
repo_id = f"immich-app/{hf_model_name}"
|
|
||||||
|
|
||||||
create_repo(repo_id, exist_ok=True)
|
|
||||||
upload_folder(
|
|
||||||
repo_id=repo_id,
|
|
||||||
folder_path=tmpdir / hf_model_name,
|
|
||||||
delete_patterns=delete_patterns,
|
|
||||||
token=token,
|
|
||||||
)
|
|
||||||
|
|
||||||
export()
|
|
||||||
if token is not None:
|
|
||||||
upload()
|
|
||||||
progress.update(task, advance=1)
|
|
47
machine-learning/export/pyproject.toml
Normal file
47
machine-learning/export/pyproject.toml
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
[project]
|
||||||
|
name = "immich_model_exporter"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Add your description here"
|
||||||
|
readme = "README.md"
|
||||||
|
requires-python = ">=3.10, <4.0"
|
||||||
|
dependencies = [
|
||||||
|
"huggingface-hub>=0.29.3",
|
||||||
|
"multilingual-clip>=1.0.10",
|
||||||
|
"onnx>=1.14.1",
|
||||||
|
"onnxruntime>=1.16.0",
|
||||||
|
"open-clip-torch>=2.31.0",
|
||||||
|
"typer>=0.15.2",
|
||||||
|
"rknn-toolkit2>=2.3.0",
|
||||||
|
"transformers>=4.49.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.uv]
|
||||||
|
override-dependencies = [
|
||||||
|
"onnx>=1.16.0,<2",
|
||||||
|
"onnxruntime>=1.18.2,<2",
|
||||||
|
"torch>=2.4",
|
||||||
|
"torchvision>=0.21",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[tool.uv.index]]
|
||||||
|
name = "pytorch-cpu"
|
||||||
|
url = "https://download.pytorch.org/whl/cpu"
|
||||||
|
explicit = true
|
||||||
|
|
||||||
|
[tool.uv.sources]
|
||||||
|
torch = [
|
||||||
|
{ index = "pytorch-cpu" },
|
||||||
|
]
|
||||||
|
torchvision = [
|
||||||
|
{ index = "pytorch-cpu" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.hatch.build.targets.sdist]
|
||||||
|
include = ["immich_model_exporter"]
|
||||||
|
|
||||||
|
[tool.hatch.build.targets.wheel]
|
||||||
|
include = ["immich_model_exporter"]
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = ["hatchling"]
|
||||||
|
build-backend = "hatchling.build"
|
@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
tags:
|
|
||||||
- immich
|
|
||||||
- clip
|
|
||||||
---
|
|
||||||
# Model Description
|
|
||||||
|
|
||||||
This repo contains ONNX exports for the CLIP model [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32).
|
|
||||||
It separates the visual and textual encoders into separate models for the purpose of generating image and text embeddings.
|
|
||||||
|
|
||||||
This repo is specifically intended for use with [Immich](https://immich.app/), a self-hosted photo library.
|
|
@ -1,69 +0,0 @@
|
|||||||
import argparse
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from rknn.api import RKNN
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser("ONNX to RKNN model converter")
|
|
||||||
parser.add_argument(
|
|
||||||
"model", help="Directory of the model that will be exported to RKNN ex:ViT-B-32__openai.", type=Path
|
|
||||||
)
|
|
||||||
parser.add_argument("target_platform", help="target platform ex:rk3566", type=str)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def ConvertModel(model_dir: Path, target_platform: str, dynamic_input=None):
|
|
||||||
input_path = model_dir / "model.onnx"
|
|
||||||
print(f"Converting model {input_path}")
|
|
||||||
rknn = RKNN(verbose=False)
|
|
||||||
|
|
||||||
rknn.config(
|
|
||||||
target_platform=target_platform,
|
|
||||||
dynamic_input=dynamic_input,
|
|
||||||
enable_flash_attention=True,
|
|
||||||
# remove_reshape=True,
|
|
||||||
# model_pruning=True
|
|
||||||
)
|
|
||||||
ret = rknn.load_onnx(model=input_path.as_posix())
|
|
||||||
|
|
||||||
if ret != 0:
|
|
||||||
print("Load failed!")
|
|
||||||
exit(ret)
|
|
||||||
|
|
||||||
ret = rknn.build(do_quantization=False)
|
|
||||||
|
|
||||||
if ret != 0:
|
|
||||||
print("Build failed!")
|
|
||||||
exit(ret)
|
|
||||||
|
|
||||||
output_path = model_dir / "rknpu" / target_platform / "model.rknn"
|
|
||||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
print(f"Exporting model {model_dir} to {output_path}")
|
|
||||||
ret = rknn.export_rknn(output_path.as_posix())
|
|
||||||
if ret != 0:
|
|
||||||
print("Export rknn model failed!")
|
|
||||||
exit(ret)
|
|
||||||
|
|
||||||
|
|
||||||
textual = args.model / "textual"
|
|
||||||
visual = args.model / "visual"
|
|
||||||
detection = args.model / "detection"
|
|
||||||
recognition = args.model / "recognition"
|
|
||||||
|
|
||||||
is_dir = [textual.is_dir(), visual.is_dir(), detection.is_dir(), recognition.is_dir()]
|
|
||||||
if not any(is_dir):
|
|
||||||
print("Unknown model")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
is_textual, is_visual, is_detection, is_recognition = is_dir
|
|
||||||
|
|
||||||
if is_textual:
|
|
||||||
ConvertModel(textual, target_platform=args.target_platform)
|
|
||||||
|
|
||||||
if is_visual:
|
|
||||||
ConvertModel(visual, target_platform=args.target_platform)
|
|
||||||
|
|
||||||
if is_detection:
|
|
||||||
ConvertModel(detection, args.target_platform, [[[1, 3, 640, 640]]])
|
|
||||||
|
|
||||||
if is_recognition:
|
|
||||||
ConvertModel(recognition, args.target_platform, [[[1, 3, 112, 112]]])
|
|
@ -1,13 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
python3 build_rknn.py "$1" "$2" > immich_to_rknn2.log
|
|
||||||
|
|
||||||
# if "No lowering found for" found in log file, return error status 1
|
|
||||||
if grep -q "No lowering found for" immich_to_rknn2.log; then
|
|
||||||
echo -e "\e[31mSome operations are not supported by RKNN, please check the log file for details.\e[0m"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo -e "\e[32mConversion completed successfully.\e[0m"
|
|
||||||
rm immich_to_rknn2.log
|
|
||||||
exit 0
|
|
||||||
fi
|
|
@ -1 +0,0 @@
|
|||||||
rknn-toolkit2==2.3.0
|
|
1207
machine-learning/export/uv.lock
generated
Normal file
1207
machine-learning/export/uv.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user