mirror of
https://github.com/immich-app/immich.git
synced 2025-05-24 01:12:58 -04:00
chore(ml): remove exporter (#17182)
* remove exporter code * update gha
This commit is contained in:
parent
16e0166d22
commit
f7d730eb05
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@ -395,10 +395,10 @@ jobs:
|
|||||||
uv sync --extra cpu
|
uv sync --extra cpu
|
||||||
- name: Lint with ruff
|
- name: Lint with ruff
|
||||||
run: |
|
run: |
|
||||||
uv run ruff check --output-format=github app export
|
uv run ruff check --output-format=github app
|
||||||
- name: Check black formatting
|
- name: Check black formatting
|
||||||
run: |
|
run: |
|
||||||
uv run black --check app export
|
uv run black --check app
|
||||||
- name: Run mypy type checking
|
- name: Run mypy type checking
|
||||||
run: |
|
run: |
|
||||||
uv run mypy --strict app/
|
uv run mypy --strict app/
|
||||||
|
@ -1 +0,0 @@
|
|||||||
3.12
|
|
@ -1,165 +0,0 @@
|
|||||||
import json
|
|
||||||
import resource
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import typer
|
|
||||||
from tenacity import retry, stop_after_attempt, wait_fixed
|
|
||||||
from typing_extensions import Annotated
|
|
||||||
|
|
||||||
from .exporters.constants import DELETE_PATTERNS, SOURCE_TO_METADATA, ModelSource, ModelTask
|
|
||||||
from .exporters.onnx import export as onnx_export
|
|
||||||
from .exporters.rknn import export as rknn_export
|
|
||||||
|
|
||||||
app = typer.Typer(pretty_exceptions_show_locals=False)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_readme(model_name: str, model_source: ModelSource) -> str:
|
|
||||||
(name, link, type) = SOURCE_TO_METADATA[model_source]
|
|
||||||
match model_source:
|
|
||||||
case ModelSource.MCLIP:
|
|
||||||
tags = ["immich", "clip", "multilingual"]
|
|
||||||
case ModelSource.OPENCLIP:
|
|
||||||
tags = ["immich", "clip"]
|
|
||||||
lowered = model_name.lower()
|
|
||||||
if "xlm" in lowered or "nllb" in lowered:
|
|
||||||
tags.append("multilingual")
|
|
||||||
case ModelSource.INSIGHTFACE:
|
|
||||||
tags = ["immich", "facial-recognition"]
|
|
||||||
case _:
|
|
||||||
raise ValueError(f"Unsupported model source {model_source}")
|
|
||||||
|
|
||||||
return f"""---
|
|
||||||
tags:
|
|
||||||
{" - " + "\n - ".join(tags)}
|
|
||||||
---
|
|
||||||
# Model Description
|
|
||||||
|
|
||||||
This repo contains ONNX exports for the associated {type} model by {name}. See the [{name}]({link}) repo for more info.
|
|
||||||
|
|
||||||
This repo is specifically intended for use with [Immich](https://immich.app/), a self-hosted photo library.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def clean_name(model_name: str) -> str:
|
|
||||||
hf_model_name = model_name.split("/")[-1]
|
|
||||||
hf_model_name = hf_model_name.replace("xlm-roberta-large", "XLM-Roberta-Large")
|
|
||||||
hf_model_name = hf_model_name.replace("xlm-roberta-base", "XLM-Roberta-Base")
|
|
||||||
return hf_model_name
|
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
|
||||||
def export(model_name: str, model_source: ModelSource, output_dir: Path = Path("models"), cache: bool = True) -> None:
|
|
||||||
hf_model_name = clean_name(model_name)
|
|
||||||
output_dir = output_dir / hf_model_name
|
|
||||||
match model_source:
|
|
||||||
case ModelSource.MCLIP | ModelSource.OPENCLIP:
|
|
||||||
output_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
onnx_export(model_name, model_source, output_dir, cache=cache)
|
|
||||||
case ModelSource.INSIGHTFACE:
|
|
||||||
from huggingface_hub import snapshot_download
|
|
||||||
|
|
||||||
# TODO: start from insightface dump instead of downloading from HF
|
|
||||||
snapshot_download(f"immich-app/{hf_model_name}", local_dir=output_dir)
|
|
||||||
case _:
|
|
||||||
raise ValueError(f"Unsupported model source {model_source}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
rknn_export(output_dir, cache=cache)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Failed to export model {model_name} to rknn: {e}")
|
|
||||||
(output_dir / "rknpu").unlink(missing_ok=True)
|
|
||||||
|
|
||||||
readme_path = output_dir / "README.md"
|
|
||||||
if not (cache or readme_path.exists()):
|
|
||||||
with open(readme_path, "w") as f:
|
|
||||||
f.write(generate_readme(model_name, model_source))
|
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
|
||||||
def profile(model_dir: Path, model_task: ModelTask, output_path: Path) -> None:
|
|
||||||
from timeit import timeit
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import onnxruntime as ort
|
|
||||||
|
|
||||||
np.random.seed(0)
|
|
||||||
|
|
||||||
sess_options = ort.SessionOptions()
|
|
||||||
sess_options.enable_cpu_mem_arena = False
|
|
||||||
providers = ["CPUExecutionProvider"]
|
|
||||||
provider_options = [{"arena_extend_strategy": "kSameAsRequested"}]
|
|
||||||
match model_task:
|
|
||||||
case ModelTask.SEARCH:
|
|
||||||
textual = ort.InferenceSession(
|
|
||||||
model_dir / "textual" / "model.onnx",
|
|
||||||
sess_options=sess_options,
|
|
||||||
providers=providers,
|
|
||||||
provider_options=provider_options,
|
|
||||||
)
|
|
||||||
tokens = {node.name: np.random.rand(*node.shape).astype(np.int32) for node in textual.get_inputs()}
|
|
||||||
|
|
||||||
visual = ort.InferenceSession(
|
|
||||||
model_dir / "visual" / "model.onnx",
|
|
||||||
sess_options=sess_options,
|
|
||||||
providers=providers,
|
|
||||||
provider_options=provider_options,
|
|
||||||
)
|
|
||||||
image = {node.name: np.random.rand(*node.shape).astype(np.float32) for node in visual.get_inputs()}
|
|
||||||
|
|
||||||
def predict() -> None:
|
|
||||||
textual.run(None, tokens)
|
|
||||||
visual.run(None, image)
|
|
||||||
|
|
||||||
case ModelTask.FACIAL_RECOGNITION:
|
|
||||||
detection = ort.InferenceSession(
|
|
||||||
model_dir / "detection" / "model.onnx",
|
|
||||||
sess_options=sess_options,
|
|
||||||
providers=providers,
|
|
||||||
provider_options=provider_options,
|
|
||||||
)
|
|
||||||
image = {node.name: np.random.rand(1, 3, 640, 640).astype(np.float32) for node in detection.get_inputs()}
|
|
||||||
|
|
||||||
recognition = ort.InferenceSession(
|
|
||||||
model_dir / "recognition" / "model.onnx",
|
|
||||||
sess_options=sess_options,
|
|
||||||
providers=providers,
|
|
||||||
provider_options=provider_options,
|
|
||||||
)
|
|
||||||
face = {node.name: np.random.rand(1, 3, 112, 112).astype(np.float32) for node in recognition.get_inputs()}
|
|
||||||
|
|
||||||
def predict() -> None:
|
|
||||||
detection.run(None, image)
|
|
||||||
recognition.run(None, face)
|
|
||||||
|
|
||||||
case _:
|
|
||||||
raise ValueError(f"Unsupported model task {model_task}")
|
|
||||||
predict()
|
|
||||||
ms = timeit(predict, number=100)
|
|
||||||
rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
|
|
||||||
json.dump({"pretrained_model": model_dir.name, "peak_rss": rss, "exec_time_ms": ms}, output_path.open("w"))
|
|
||||||
print(f"Model {model_dir.name} took {ms:.2f}ms per iteration using {rss / 1024:.2f}MiB of memory")
|
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
|
||||||
def upload(
|
|
||||||
model_dir: Path,
|
|
||||||
hf_organization: str = "immich-app",
|
|
||||||
hf_auth_token: Annotated[str | None, typer.Option(envvar="HF_AUTH_TOKEN")] = None,
|
|
||||||
) -> None:
|
|
||||||
from huggingface_hub import create_repo, upload_folder
|
|
||||||
|
|
||||||
repo_id = f"{hf_organization}/{model_dir.name}"
|
|
||||||
|
|
||||||
@retry(stop=stop_after_attempt(5), wait=wait_fixed(5))
|
|
||||||
def upload_model() -> None:
|
|
||||||
create_repo(repo_id, exist_ok=True, token=hf_auth_token)
|
|
||||||
upload_folder(
|
|
||||||
repo_id=repo_id,
|
|
||||||
folder_path=model_dir,
|
|
||||||
# remote repo files to be deleted before uploading
|
|
||||||
# deletion is in the same commit as the upload, so it's atomic
|
|
||||||
delete_patterns=DELETE_PATTERNS,
|
|
||||||
token=hf_auth_token,
|
|
||||||
)
|
|
||||||
|
|
||||||
upload_model()
|
|
@ -1,3 +0,0 @@
|
|||||||
from immich_model_exporter import app
|
|
||||||
|
|
||||||
app()
|
|
@ -1,54 +0,0 @@
|
|||||||
from enum import StrEnum
|
|
||||||
from typing import NamedTuple
|
|
||||||
|
|
||||||
|
|
||||||
class ModelSource(StrEnum):
|
|
||||||
INSIGHTFACE = "insightface"
|
|
||||||
MCLIP = "mclip"
|
|
||||||
OPENCLIP = "openclip"
|
|
||||||
|
|
||||||
|
|
||||||
class ModelTask(StrEnum):
|
|
||||||
FACIAL_RECOGNITION = "facial-recognition"
|
|
||||||
SEARCH = "clip"
|
|
||||||
|
|
||||||
|
|
||||||
class SourceMetadata(NamedTuple):
|
|
||||||
name: str
|
|
||||||
link: str
|
|
||||||
type: str
|
|
||||||
|
|
||||||
|
|
||||||
SOURCE_TO_METADATA = {
|
|
||||||
ModelSource.MCLIP: SourceMetadata("M-CLIP", "https://huggingface.co/M-CLIP", "CLIP"),
|
|
||||||
ModelSource.OPENCLIP: SourceMetadata("OpenCLIP", "https://github.com/mlfoundations/open_clip", "CLIP"),
|
|
||||||
ModelSource.INSIGHTFACE: SourceMetadata(
|
|
||||||
"InsightFace", "https://github.com/deepinsight/insightface/tree/master", "facial recognition"
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
SOURCE_TO_TASK = {
|
|
||||||
ModelSource.MCLIP: ModelTask.SEARCH,
|
|
||||||
ModelSource.OPENCLIP: ModelTask.SEARCH,
|
|
||||||
ModelSource.INSIGHTFACE: ModelTask.FACIAL_RECOGNITION,
|
|
||||||
}
|
|
||||||
|
|
||||||
RKNN_SOCS = ["rk3566", "rk3568", "rk3576", "rk3588"]
|
|
||||||
|
|
||||||
|
|
||||||
# glob to delete old UUID blobs when reuploading models
|
|
||||||
_uuid_char = "[a-fA-F0-9]"
|
|
||||||
_uuid_glob = _uuid_char * 8 + "-" + _uuid_char * 4 + "-" + _uuid_char * 4 + "-" + _uuid_char * 4 + "-" + _uuid_char * 12
|
|
||||||
DELETE_PATTERNS = [
|
|
||||||
"**/*onnx*",
|
|
||||||
"**/Constant*",
|
|
||||||
"**/*.weight",
|
|
||||||
"**/*.bias",
|
|
||||||
"**/*.proj",
|
|
||||||
"**/*in_proj_bias",
|
|
||||||
"**/*.npy",
|
|
||||||
"**/*.latent",
|
|
||||||
"**/*.pos_embed",
|
|
||||||
f"**/{_uuid_glob}",
|
|
||||||
]
|
|
@ -1,20 +0,0 @@
|
|||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from ..constants import ModelSource
|
|
||||||
from .models import mclip, openclip
|
|
||||||
|
|
||||||
|
|
||||||
def export(
|
|
||||||
model_name: str, model_source: ModelSource, output_dir: Path, opset_version: int = 19, cache: bool = True
|
|
||||||
) -> None:
|
|
||||||
visual_dir = output_dir / "visual"
|
|
||||||
textual_dir = output_dir / "textual"
|
|
||||||
match model_source:
|
|
||||||
case ModelSource.MCLIP:
|
|
||||||
mclip.to_onnx(model_name, opset_version, visual_dir, textual_dir, cache=cache)
|
|
||||||
case ModelSource.OPENCLIP:
|
|
||||||
name, _, pretrained = model_name.partition("__")
|
|
||||||
config = openclip.OpenCLIPModelConfig(name, pretrained)
|
|
||||||
openclip.to_onnx(config, opset_version, visual_dir, textual_dir, cache=cache)
|
|
||||||
case _:
|
|
||||||
raise ValueError(f"Unsupported model source {model_source}")
|
|
@ -1,77 +0,0 @@
|
|||||||
import warnings
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from .openclip import OpenCLIPModelConfig
|
|
||||||
from .openclip import to_onnx as openclip_to_onnx
|
|
||||||
from .util import get_model_path
|
|
||||||
|
|
||||||
_MCLIP_TO_OPENCLIP = {
|
|
||||||
"M-CLIP/XLM-Roberta-Large-Vit-B-32": OpenCLIPModelConfig("ViT-B-32", "openai"),
|
|
||||||
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus": OpenCLIPModelConfig("ViT-B-16-plus-240", "laion400m_e32"),
|
|
||||||
"M-CLIP/LABSE-Vit-L-14": OpenCLIPModelConfig("ViT-L-14", "openai"),
|
|
||||||
"M-CLIP/XLM-Roberta-Large-Vit-L-14": OpenCLIPModelConfig("ViT-L-14", "openai"),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def to_onnx(
|
|
||||||
model_name: str,
|
|
||||||
opset_version: int,
|
|
||||||
output_dir_visual: Path | str,
|
|
||||||
output_dir_textual: Path | str,
|
|
||||||
cache: bool = True,
|
|
||||||
) -> tuple[Path, Path]:
|
|
||||||
textual_path = get_model_path(output_dir_textual)
|
|
||||||
if not cache or not textual_path.exists():
|
|
||||||
import torch
|
|
||||||
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
|
|
||||||
from transformers import AutoTokenizer
|
|
||||||
|
|
||||||
torch.backends.mha.set_fastpath_enabled(False)
|
|
||||||
|
|
||||||
model = MultilingualCLIP.from_pretrained(model_name)
|
|
||||||
AutoTokenizer.from_pretrained(model_name).save_pretrained(output_dir_textual)
|
|
||||||
|
|
||||||
model.eval()
|
|
||||||
for param in model.parameters():
|
|
||||||
param.requires_grad_(False)
|
|
||||||
|
|
||||||
_export_text_encoder(model, textual_path, opset_version)
|
|
||||||
else:
|
|
||||||
print(f"Model {textual_path} already exists, skipping")
|
|
||||||
visual_path, _ = openclip_to_onnx(_MCLIP_TO_OPENCLIP[model_name], opset_version, output_dir_visual, cache=cache)
|
|
||||||
assert visual_path is not None, "Visual model export failed"
|
|
||||||
return visual_path, textual_path
|
|
||||||
|
|
||||||
|
|
||||||
def _export_text_encoder(model: Any, output_path: Path | str, opset_version: int) -> None:
|
|
||||||
import torch
|
|
||||||
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
|
|
||||||
|
|
||||||
output_path = Path(output_path)
|
|
||||||
|
|
||||||
def forward(self: MultilingualCLIP, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
|
|
||||||
embs = self.transformer(input_ids, attention_mask)[0]
|
|
||||||
embs = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
|
|
||||||
embs = self.LinearTransformation(embs)
|
|
||||||
return torch.nn.functional.normalize(embs, dim=-1)
|
|
||||||
|
|
||||||
# unfortunately need to monkeypatch for tracing to work here
|
|
||||||
# otherwise it hits the 2GiB protobuf serialization limit
|
|
||||||
MultilingualCLIP.forward = forward
|
|
||||||
|
|
||||||
args = (torch.ones(1, 77, dtype=torch.int32), torch.ones(1, 77, dtype=torch.int32))
|
|
||||||
with warnings.catch_warnings():
|
|
||||||
warnings.simplefilter("ignore", UserWarning)
|
|
||||||
torch.onnx.export(
|
|
||||||
model,
|
|
||||||
args,
|
|
||||||
output_path.as_posix(),
|
|
||||||
input_names=["input_ids", "attention_mask"],
|
|
||||||
output_names=["embedding"],
|
|
||||||
opset_version=opset_version,
|
|
||||||
# dynamic_axes={
|
|
||||||
# "input_ids": {0: "batch_size", 1: "sequence_length"},
|
|
||||||
# "attention_mask": {0: "batch_size", 1: "sequence_length"},
|
|
||||||
# },
|
|
||||||
)
|
|
@ -1,151 +0,0 @@
|
|||||||
import warnings
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from functools import cached_property
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from .util import get_model_path, save_config
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class OpenCLIPModelConfig:
|
|
||||||
name: str
|
|
||||||
pretrained: str
|
|
||||||
|
|
||||||
@cached_property
|
|
||||||
def model_config(self) -> dict[str, Any]:
|
|
||||||
import open_clip
|
|
||||||
|
|
||||||
config: dict[str, Any] | None = open_clip.get_model_config(self.name)
|
|
||||||
if config is None:
|
|
||||||
raise ValueError(f"Unknown model {self.name}")
|
|
||||||
return config
|
|
||||||
|
|
||||||
@property
|
|
||||||
def image_size(self) -> int:
|
|
||||||
image_size: int = self.model_config["vision_cfg"]["image_size"]
|
|
||||||
return image_size
|
|
||||||
|
|
||||||
@property
|
|
||||||
def sequence_length(self) -> int:
|
|
||||||
context_length: int = self.model_config["text_cfg"].get("context_length", 77)
|
|
||||||
return context_length
|
|
||||||
|
|
||||||
|
|
||||||
def to_onnx(
|
|
||||||
model_cfg: OpenCLIPModelConfig,
|
|
||||||
opset_version: int,
|
|
||||||
output_dir_visual: Path | str | None = None,
|
|
||||||
output_dir_textual: Path | str | None = None,
|
|
||||||
cache: bool = True,
|
|
||||||
) -> tuple[Path | None, Path | None]:
|
|
||||||
visual_path = None
|
|
||||||
textual_path = None
|
|
||||||
if output_dir_visual is not None:
|
|
||||||
output_dir_visual = Path(output_dir_visual)
|
|
||||||
visual_path = get_model_path(output_dir_visual)
|
|
||||||
|
|
||||||
if output_dir_textual is not None:
|
|
||||||
output_dir_textual = Path(output_dir_textual)
|
|
||||||
textual_path = get_model_path(output_dir_textual)
|
|
||||||
|
|
||||||
if cache and ((textual_path is None or textual_path.exists()) and (visual_path is None or visual_path.exists())):
|
|
||||||
print(f"Models {textual_path} and {visual_path} already exist, skipping")
|
|
||||||
return visual_path, textual_path
|
|
||||||
|
|
||||||
import open_clip
|
|
||||||
import torch
|
|
||||||
from transformers import AutoTokenizer
|
|
||||||
|
|
||||||
torch.backends.mha.set_fastpath_enabled(False)
|
|
||||||
|
|
||||||
model = open_clip.create_model(
|
|
||||||
model_cfg.name,
|
|
||||||
pretrained=model_cfg.pretrained,
|
|
||||||
jit=False,
|
|
||||||
require_pretrained=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
text_vision_cfg = open_clip.get_model_config(model_cfg.name)
|
|
||||||
|
|
||||||
model.eval()
|
|
||||||
for param in model.parameters():
|
|
||||||
param.requires_grad_(False)
|
|
||||||
|
|
||||||
if visual_path is not None and output_dir_visual is not None:
|
|
||||||
if not cache or not visual_path.exists():
|
|
||||||
save_config(
|
|
||||||
open_clip.get_model_preprocess_cfg(model),
|
|
||||||
output_dir_visual / "preprocess_cfg.json",
|
|
||||||
)
|
|
||||||
save_config(text_vision_cfg, output_dir_visual.parent / "config.json")
|
|
||||||
_export_image_encoder(model, model_cfg, visual_path, opset_version)
|
|
||||||
else:
|
|
||||||
print(f"Model {visual_path} already exists, skipping")
|
|
||||||
|
|
||||||
if textual_path is not None and output_dir_textual is not None:
|
|
||||||
if not cache or not textual_path.exists():
|
|
||||||
tokenizer_name = text_vision_cfg["text_cfg"].get("hf_tokenizer_name", "openai/clip-vit-base-patch32")
|
|
||||||
AutoTokenizer.from_pretrained(tokenizer_name).save_pretrained(output_dir_textual)
|
|
||||||
_export_text_encoder(model, model_cfg, textual_path, opset_version)
|
|
||||||
else:
|
|
||||||
print(f"Model {textual_path} already exists, skipping")
|
|
||||||
return visual_path, textual_path
|
|
||||||
|
|
||||||
|
|
||||||
def _export_image_encoder(
|
|
||||||
model: Any, model_cfg: OpenCLIPModelConfig, output_path: Path | str, opset_version: int
|
|
||||||
) -> None:
|
|
||||||
import torch
|
|
||||||
|
|
||||||
output_path = Path(output_path)
|
|
||||||
|
|
||||||
def encode_image(image: torch.Tensor) -> torch.Tensor:
|
|
||||||
output = model.encode_image(image, normalize=True)
|
|
||||||
assert isinstance(output, torch.Tensor)
|
|
||||||
return output
|
|
||||||
|
|
||||||
model.forward = encode_image
|
|
||||||
|
|
||||||
args = (torch.randn(1, 3, model_cfg.image_size, model_cfg.image_size),)
|
|
||||||
|
|
||||||
with warnings.catch_warnings():
|
|
||||||
warnings.simplefilter("ignore", UserWarning)
|
|
||||||
torch.onnx.export(
|
|
||||||
model,
|
|
||||||
args,
|
|
||||||
output_path.as_posix(),
|
|
||||||
input_names=["image"],
|
|
||||||
output_names=["embedding"],
|
|
||||||
opset_version=opset_version,
|
|
||||||
# dynamic_axes={"image": {0: "batch_size"}},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _export_text_encoder(
|
|
||||||
model: Any, model_cfg: OpenCLIPModelConfig, output_path: Path | str, opset_version: int
|
|
||||||
) -> None:
|
|
||||||
import torch
|
|
||||||
|
|
||||||
output_path = Path(output_path)
|
|
||||||
|
|
||||||
def encode_text(text: torch.Tensor) -> torch.Tensor:
|
|
||||||
output = model.encode_text(text, normalize=True)
|
|
||||||
assert isinstance(output, torch.Tensor)
|
|
||||||
return output
|
|
||||||
|
|
||||||
model.forward = encode_text
|
|
||||||
|
|
||||||
args = (torch.ones(1, model_cfg.sequence_length, dtype=torch.int32),)
|
|
||||||
|
|
||||||
with warnings.catch_warnings():
|
|
||||||
warnings.simplefilter("ignore", UserWarning)
|
|
||||||
torch.onnx.export(
|
|
||||||
model,
|
|
||||||
args,
|
|
||||||
output_path.as_posix(),
|
|
||||||
input_names=["text"],
|
|
||||||
output_names=["embedding"],
|
|
||||||
opset_version=opset_version,
|
|
||||||
# dynamic_axes={"text": {0: "batch_size"}},
|
|
||||||
)
|
|
@ -1,15 +0,0 @@
|
|||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
|
|
||||||
def get_model_path(output_dir: Path | str) -> Path:
|
|
||||||
output_dir = Path(output_dir)
|
|
||||||
output_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
return output_dir / "model.onnx"
|
|
||||||
|
|
||||||
|
|
||||||
def save_config(config: Any, output_path: Path | str) -> None:
|
|
||||||
output_path = Path(output_path)
|
|
||||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
json.dump(config, output_path.open("w"))
|
|
@ -1,96 +0,0 @@
|
|||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from .constants import RKNN_SOCS
|
|
||||||
|
|
||||||
|
|
||||||
def _export_platform(
|
|
||||||
model_dir: Path,
|
|
||||||
target_platform: str,
|
|
||||||
inputs: list[str] | None = None,
|
|
||||||
input_size_list: list[list[int]] | None = None,
|
|
||||||
fuse_matmul_softmax_matmul_to_sdpa: bool = True,
|
|
||||||
cache: bool = True,
|
|
||||||
) -> None:
|
|
||||||
from rknn.api import RKNN
|
|
||||||
|
|
||||||
input_path = model_dir / "model.onnx"
|
|
||||||
output_path = model_dir / "rknpu" / target_platform / "model.rknn"
|
|
||||||
if cache and output_path.exists():
|
|
||||||
print(f"Model {input_path} already exists at {output_path}, skipping")
|
|
||||||
return
|
|
||||||
|
|
||||||
print(f"Exporting model {input_path} to {output_path}")
|
|
||||||
|
|
||||||
rknn = RKNN(verbose=False)
|
|
||||||
|
|
||||||
rknn.config(
|
|
||||||
target_platform=target_platform,
|
|
||||||
disable_rules=["fuse_matmul_softmax_matmul_to_sdpa"] if not fuse_matmul_softmax_matmul_to_sdpa else [],
|
|
||||||
enable_flash_attention=False,
|
|
||||||
model_pruning=True,
|
|
||||||
)
|
|
||||||
ret = rknn.load_onnx(model=input_path.as_posix(), inputs=inputs, input_size_list=input_size_list)
|
|
||||||
|
|
||||||
if ret != 0:
|
|
||||||
raise RuntimeError("Load failed!")
|
|
||||||
|
|
||||||
ret = rknn.build(do_quantization=False)
|
|
||||||
|
|
||||||
if ret != 0:
|
|
||||||
raise RuntimeError("Build failed!")
|
|
||||||
|
|
||||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
ret = rknn.export_rknn(output_path.as_posix())
|
|
||||||
if ret != 0:
|
|
||||||
raise RuntimeError("Export rknn model failed!")
|
|
||||||
|
|
||||||
|
|
||||||
def _export_platforms(
|
|
||||||
model_dir: Path,
|
|
||||||
inputs: list[str] | None = None,
|
|
||||||
input_size_list: list[list[int]] | None = None,
|
|
||||||
cache: bool = True,
|
|
||||||
) -> None:
|
|
||||||
fuse_matmul_softmax_matmul_to_sdpa = True
|
|
||||||
for soc in RKNN_SOCS:
|
|
||||||
try:
|
|
||||||
_export_platform(
|
|
||||||
model_dir,
|
|
||||||
soc,
|
|
||||||
inputs=inputs,
|
|
||||||
input_size_list=input_size_list,
|
|
||||||
fuse_matmul_softmax_matmul_to_sdpa=fuse_matmul_softmax_matmul_to_sdpa,
|
|
||||||
cache=cache,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Failed to export model for {soc}: {e}")
|
|
||||||
if "inputs or 'outputs' must be set" in str(e):
|
|
||||||
print("Retrying without fuse_matmul_softmax_matmul_to_sdpa")
|
|
||||||
fuse_matmul_softmax_matmul_to_sdpa = False
|
|
||||||
_export_platform(
|
|
||||||
model_dir,
|
|
||||||
soc,
|
|
||||||
inputs=inputs,
|
|
||||||
input_size_list=input_size_list,
|
|
||||||
fuse_matmul_softmax_matmul_to_sdpa=fuse_matmul_softmax_matmul_to_sdpa,
|
|
||||||
cache=cache,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def export(model_dir: Path, cache: bool = True) -> None:
|
|
||||||
textual = model_dir / "textual"
|
|
||||||
visual = model_dir / "visual"
|
|
||||||
detection = model_dir / "detection"
|
|
||||||
recognition = model_dir / "recognition"
|
|
||||||
|
|
||||||
if textual.is_dir():
|
|
||||||
_export_platforms(textual, cache=cache)
|
|
||||||
|
|
||||||
if visual.is_dir():
|
|
||||||
_export_platforms(visual, cache=cache)
|
|
||||||
|
|
||||||
if detection.is_dir():
|
|
||||||
_export_platforms(detection, inputs=["input.1"], input_size_list=[[1, 3, 640, 640]], cache=cache)
|
|
||||||
|
|
||||||
if recognition.is_dir():
|
|
||||||
_export_platforms(recognition, inputs=["input.1"], input_size_list=[[1, 3, 112, 112]], cache=cache)
|
|
@ -1,22 +0,0 @@
|
|||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
models_dir = Path("models")
|
|
||||||
model_to_embed_dim = {}
|
|
||||||
for model_dir in models_dir.iterdir():
|
|
||||||
if not model_dir.is_dir():
|
|
||||||
continue
|
|
||||||
|
|
||||||
config_path = model_dir / "config.json"
|
|
||||||
if not config_path.exists():
|
|
||||||
print(f"Skipping {model_dir.name} as it does not have a config.json")
|
|
||||||
continue
|
|
||||||
with open(config_path) as f:
|
|
||||||
config = json.load(f)
|
|
||||||
embed_dim = config.get("embed_dim")
|
|
||||||
if embed_dim is None:
|
|
||||||
print(f"Skipping {model_dir.name} as it does not have an embed_dim")
|
|
||||||
continue
|
|
||||||
print(f"{model_dir.name}: {embed_dim}")
|
|
||||||
model_to_embed_dim[model_dir.name] = {"dimSize": embed_dim}
|
|
||||||
print(json.dumps(model_to_embed_dim))
|
|
@ -1,121 +0,0 @@
|
|||||||
import polars as pl
|
|
||||||
|
|
||||||
|
|
||||||
def collapsed_table(language: str, df: pl.DataFrame) -> str:
|
|
||||||
with pl.Config(
|
|
||||||
tbl_formatting="ASCII_MARKDOWN",
|
|
||||||
tbl_hide_column_data_types=True,
|
|
||||||
tbl_hide_dataframe_shape=True,
|
|
||||||
fmt_str_lengths=100,
|
|
||||||
tbl_rows=1000,
|
|
||||||
tbl_width_chars=1000,
|
|
||||||
):
|
|
||||||
return f"<details>\n<summary>{language}</summary>\n{str(df)}\n</details>"
|
|
||||||
|
|
||||||
|
|
||||||
languages = {
|
|
||||||
"en": "English",
|
|
||||||
"ar": "Arabic",
|
|
||||||
"bn": "Bengali",
|
|
||||||
"zh": "Chinese (Simplified)",
|
|
||||||
"hr": "Croatian",
|
|
||||||
"quz": "Cusco Quechua",
|
|
||||||
"cs": "Czech",
|
|
||||||
"da": "Danish",
|
|
||||||
"nl": "Dutch",
|
|
||||||
"fil": "Filipino",
|
|
||||||
"fi": "Finnish",
|
|
||||||
"fr": "French",
|
|
||||||
"de": "German",
|
|
||||||
"el": "Greek",
|
|
||||||
"he": "Hebrew",
|
|
||||||
"hi": "Hindi",
|
|
||||||
"hu": "Hungarian",
|
|
||||||
"id": "Indonesian",
|
|
||||||
"it": "Italian",
|
|
||||||
"ja": "Japanese",
|
|
||||||
"ko": "Korean",
|
|
||||||
"mi": "Maori",
|
|
||||||
"no": "Norwegian",
|
|
||||||
"fa": "Persian",
|
|
||||||
"pl": "Polish",
|
|
||||||
"pt": "Portuguese",
|
|
||||||
"ro": "Romanian",
|
|
||||||
"ru": "Russian",
|
|
||||||
"es": "Spanish",
|
|
||||||
"sw": "Swahili",
|
|
||||||
"sv": "Swedish",
|
|
||||||
"te": "Telugu",
|
|
||||||
"th": "Thai",
|
|
||||||
"tr": "Turkish",
|
|
||||||
"uk": "Ukrainian",
|
|
||||||
"vi": "Vietnamese",
|
|
||||||
}
|
|
||||||
|
|
||||||
profile_df = pl.scan_ndjson("profiling/*.json").select("pretrained_model", "peak_rss", "exec_time_ms")
|
|
||||||
eval_df = pl.scan_ndjson("results/*.json").select("model", "pretrained", "language", "metrics")
|
|
||||||
|
|
||||||
eval_df = eval_df.with_columns(
|
|
||||||
model=pl.col("model")
|
|
||||||
.str.replace("xlm-roberta-base", "XLM-Roberta-Base")
|
|
||||||
.str.replace("xlm-roberta-large", "XLM-Roberta-Large")
|
|
||||||
)
|
|
||||||
eval_df = eval_df.with_columns(pretrained_model=pl.concat_str(pl.col("model"), pl.col("pretrained"), separator="__"))
|
|
||||||
eval_df = eval_df.drop("model", "pretrained")
|
|
||||||
eval_df = eval_df.join(profile_df, on="pretrained_model")
|
|
||||||
|
|
||||||
eval_df = eval_df.with_columns(
|
|
||||||
recall=(
|
|
||||||
pl.col("metrics").struct.field("image_retrieval_recall@1")
|
|
||||||
+ pl.col("metrics").struct.field("image_retrieval_recall@5")
|
|
||||||
+ pl.col("metrics").struct.field("image_retrieval_recall@10")
|
|
||||||
)
|
|
||||||
* (100 / 3)
|
|
||||||
)
|
|
||||||
|
|
||||||
pareto_front = eval_df.join_where(
|
|
||||||
eval_df.select("language", "peak_rss", "exec_time_ms", "recall").rename(
|
|
||||||
{
|
|
||||||
"language": "language_other",
|
|
||||||
"peak_rss": "peak_rss_other",
|
|
||||||
"exec_time_ms": "exec_time_ms_other",
|
|
||||||
"recall": "recall_other",
|
|
||||||
}
|
|
||||||
),
|
|
||||||
(pl.col("language") == pl.col("language_other"))
|
|
||||||
& (pl.col("peak_rss_other") <= pl.col("peak_rss"))
|
|
||||||
& (pl.col("exec_time_ms_other") <= pl.col("exec_time_ms"))
|
|
||||||
& (pl.col("recall_other") >= pl.col("recall"))
|
|
||||||
& (
|
|
||||||
(pl.col("peak_rss_other") < pl.col("peak_rss"))
|
|
||||||
| (pl.col("exec_time_ms_other") < pl.col("exec_time_ms"))
|
|
||||||
| (pl.col("recall_other") > pl.col("recall"))
|
|
||||||
),
|
|
||||||
)
|
|
||||||
eval_df = eval_df.join(pareto_front, on=["pretrained_model", "language"], how="left")
|
|
||||||
eval_df = eval_df.with_columns(is_pareto=pl.col("recall_other").is_null())
|
|
||||||
eval_df = (
|
|
||||||
eval_df.drop("peak_rss_other", "exec_time_ms_other", "recall_other", "language_other")
|
|
||||||
.unique(subset=["pretrained_model", "language"])
|
|
||||||
.collect()
|
|
||||||
)
|
|
||||||
eval_df.write_parquet("model_info.parquet")
|
|
||||||
|
|
||||||
eval_df = eval_df.drop("metrics")
|
|
||||||
eval_df = eval_df.filter(pl.col("recall") >= 20)
|
|
||||||
eval_df = eval_df.sort("recall", descending=True)
|
|
||||||
eval_df = eval_df.select(
|
|
||||||
pl.col("pretrained_model").alias("Model"),
|
|
||||||
(pl.col("peak_rss") / 1024).round().cast(pl.UInt32).alias("Memory (MiB)"),
|
|
||||||
pl.col("exec_time_ms").round(2).alias("Execution Time (ms)"),
|
|
||||||
pl.col("language").alias("Language"),
|
|
||||||
pl.col("recall").round(2).alias("Recall (%)"),
|
|
||||||
pl.when(pl.col("is_pareto")).then(pl.lit("✅")).otherwise(pl.lit("❌")).alias("Pareto Optimal"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
for language in languages:
|
|
||||||
lang_df = eval_df.filter(pl.col("Language") == language).drop("Language")
|
|
||||||
if lang_df.shape[0] == 0:
|
|
||||||
continue
|
|
||||||
print(collapsed_table(languages[language], lang_df))
|
|
@ -1,171 +0,0 @@
|
|||||||
import subprocess
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from exporters.constants import ModelSource
|
|
||||||
|
|
||||||
from immich_model_exporter import clean_name
|
|
||||||
from immich_model_exporter.exporters.constants import SOURCE_TO_TASK
|
|
||||||
|
|
||||||
mclip = [
|
|
||||||
"M-CLIP/LABSE-Vit-L-14",
|
|
||||||
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
|
|
||||||
"M-CLIP/XLM-Roberta-Large-Vit-B-32",
|
|
||||||
"M-CLIP/XLM-Roberta-Large-Vit-L-14",
|
|
||||||
]
|
|
||||||
|
|
||||||
openclip = [
|
|
||||||
"RN101__openai",
|
|
||||||
"RN101__yfcc15m",
|
|
||||||
"RN50__cc12m",
|
|
||||||
"RN50__openai",
|
|
||||||
"RN50__yfcc15m",
|
|
||||||
"RN50x16__openai",
|
|
||||||
"RN50x4__openai",
|
|
||||||
"RN50x64__openai",
|
|
||||||
"ViT-B-16-SigLIP-256__webli",
|
|
||||||
"ViT-B-16-SigLIP-384__webli",
|
|
||||||
"ViT-B-16-SigLIP-512__webli",
|
|
||||||
"ViT-B-16-SigLIP-i18n-256__webli",
|
|
||||||
"ViT-B-16-SigLIP2__webli",
|
|
||||||
"ViT-B-16-SigLIP__webli",
|
|
||||||
"ViT-B-16-plus-240__laion400m_e31",
|
|
||||||
"ViT-B-16-plus-240__laion400m_e32",
|
|
||||||
"ViT-B-16__laion400m_e31",
|
|
||||||
"ViT-B-16__laion400m_e32",
|
|
||||||
"ViT-B-16__openai",
|
|
||||||
"ViT-B-32-SigLIP2-256__webli",
|
|
||||||
"ViT-B-32__laion2b-s34b-b79k",
|
|
||||||
"ViT-B-32__laion2b_e16",
|
|
||||||
"ViT-B-32__laion400m_e31",
|
|
||||||
"ViT-B-32__laion400m_e32",
|
|
||||||
"ViT-B-32__openai",
|
|
||||||
"ViT-H-14-378-quickgelu__dfn5b",
|
|
||||||
"ViT-H-14-quickgelu__dfn5b",
|
|
||||||
"ViT-H-14__laion2b-s32b-b79k",
|
|
||||||
"ViT-L-14-336__openai",
|
|
||||||
"ViT-L-14-quickgelu__dfn2b",
|
|
||||||
"ViT-L-14__laion2b-s32b-b82k",
|
|
||||||
"ViT-L-14__laion400m_e31",
|
|
||||||
"ViT-L-14__laion400m_e32",
|
|
||||||
"ViT-L-14__openai",
|
|
||||||
"ViT-L-16-SigLIP-256__webli",
|
|
||||||
"ViT-L-16-SigLIP-384__webli",
|
|
||||||
"ViT-L-16-SigLIP2-256__webli",
|
|
||||||
"ViT-L-16-SigLIP2-384__webli",
|
|
||||||
"ViT-L-16-SigLIP2-512__webli",
|
|
||||||
"ViT-SO400M-14-SigLIP-384__webli",
|
|
||||||
"ViT-SO400M-14-SigLIP2-378__webli",
|
|
||||||
"ViT-SO400M-14-SigLIP2__webli",
|
|
||||||
"ViT-SO400M-16-SigLIP2-256__webli",
|
|
||||||
"ViT-SO400M-16-SigLIP2-384__webli",
|
|
||||||
"ViT-SO400M-16-SigLIP2-512__webli",
|
|
||||||
"ViT-gopt-16-SigLIP2-256__webli",
|
|
||||||
"ViT-gopt-16-SigLIP2-384__webli",
|
|
||||||
"nllb-clip-base-siglip__mrl",
|
|
||||||
"nllb-clip-base-siglip__v1",
|
|
||||||
"nllb-clip-large-siglip__mrl",
|
|
||||||
"nllb-clip-large-siglip__v1",
|
|
||||||
"xlm-roberta-base-ViT-B-32__laion5b_s13b_b90k",
|
|
||||||
"xlm-roberta-large-ViT-H-14__frozen_laion5b_s13b_b90k",
|
|
||||||
]
|
|
||||||
|
|
||||||
insightface = [
|
|
||||||
"antelopev2",
|
|
||||||
"buffalo_l",
|
|
||||||
"buffalo_m",
|
|
||||||
"buffalo_s",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def export_models(models: list[str], source: ModelSource) -> None:
|
|
||||||
profiling_dir = Path("profiling")
|
|
||||||
profiling_dir.mkdir(exist_ok=True)
|
|
||||||
for model in models:
|
|
||||||
try:
|
|
||||||
model_dir = f"models/{clean_name(model)}"
|
|
||||||
task = SOURCE_TO_TASK[source]
|
|
||||||
|
|
||||||
print(f"Processing model {model}")
|
|
||||||
subprocess.check_call(["python", "-m", "immich_model_exporter", "export", model, source])
|
|
||||||
subprocess.check_call(
|
|
||||||
[
|
|
||||||
"python",
|
|
||||||
"-m",
|
|
||||||
"immich_model_exporter",
|
|
||||||
"profile",
|
|
||||||
model_dir,
|
|
||||||
task,
|
|
||||||
"--output_path",
|
|
||||||
profiling_dir / f"{model}.json",
|
|
||||||
]
|
|
||||||
)
|
|
||||||
subprocess.check_call(["python", "-m", "immich_model_exporter", "upload", model_dir])
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Failed to export model {model}: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
export_models(mclip, ModelSource.MCLIP)
|
|
||||||
export_models(openclip, ModelSource.OPENCLIP)
|
|
||||||
export_models(insightface, ModelSource.INSIGHTFACE)
|
|
||||||
|
|
||||||
Path("results").mkdir(exist_ok=True)
|
|
||||||
subprocess.check_call(
|
|
||||||
[
|
|
||||||
"python",
|
|
||||||
"clip_benchmark",
|
|
||||||
"eval",
|
|
||||||
"--pretrained_model",
|
|
||||||
*[name.replace("__", ",") for name in openclip],
|
|
||||||
"--task",
|
|
||||||
"zeroshot_retrieval",
|
|
||||||
"--dataset",
|
|
||||||
"crossmodal3600",
|
|
||||||
"--batch_size",
|
|
||||||
"64",
|
|
||||||
"--language",
|
|
||||||
"ar",
|
|
||||||
"bn",
|
|
||||||
"cs",
|
|
||||||
"da",
|
|
||||||
"de",
|
|
||||||
"el",
|
|
||||||
"en",
|
|
||||||
"es",
|
|
||||||
"fa",
|
|
||||||
"fi",
|
|
||||||
"fil",
|
|
||||||
"fr",
|
|
||||||
"he",
|
|
||||||
"hi",
|
|
||||||
"hr",
|
|
||||||
"hu",
|
|
||||||
"id",
|
|
||||||
"it",
|
|
||||||
"ja",
|
|
||||||
"ko",
|
|
||||||
"mi",
|
|
||||||
"nl",
|
|
||||||
"no",
|
|
||||||
"pl",
|
|
||||||
"pt",
|
|
||||||
"quz",
|
|
||||||
"ro",
|
|
||||||
"ru",
|
|
||||||
"sv",
|
|
||||||
"sw",
|
|
||||||
"te",
|
|
||||||
"th",
|
|
||||||
"tr",
|
|
||||||
"uk",
|
|
||||||
"vi",
|
|
||||||
"zh",
|
|
||||||
"--recall_k",
|
|
||||||
"1",
|
|
||||||
"5",
|
|
||||||
"10",
|
|
||||||
"--no_amp",
|
|
||||||
"--output",
|
|
||||||
"results/{dataset}_{language}_{model}_{pretrained}.json",
|
|
||||||
]
|
|
||||||
)
|
|
@ -1,60 +0,0 @@
|
|||||||
[project]
|
|
||||||
name = "immich_model_exporter"
|
|
||||||
version = "0.1.0"
|
|
||||||
description = "Add your description here"
|
|
||||||
readme = "README.md"
|
|
||||||
requires-python = ">=3.10, <4.0"
|
|
||||||
dependencies = [
|
|
||||||
"huggingface-hub>=0.29.3",
|
|
||||||
"multilingual-clip>=1.0.10",
|
|
||||||
"onnx>=1.14.1",
|
|
||||||
"onnxruntime>=1.16.0",
|
|
||||||
"open-clip-torch>=2.31.0",
|
|
||||||
"typer>=0.15.2",
|
|
||||||
"rknn-toolkit2>=2.3.0",
|
|
||||||
"transformers>=4.49.0",
|
|
||||||
"tenacity>=9.0.0",
|
|
||||||
"clip-benchmark>=1.6.1",
|
|
||||||
"polars>=1.25.2",
|
|
||||||
]
|
|
||||||
|
|
||||||
[dependency-groups]
|
|
||||||
dev = ["black>=23.3.0", "mypy>=1.3.0", "ruff>=0.0.272"]
|
|
||||||
|
|
||||||
[tool.uv]
|
|
||||||
override-dependencies = [
|
|
||||||
"onnx>=1.16.0,<2",
|
|
||||||
"onnxruntime>=1.18.2,<2",
|
|
||||||
"torch>=2.4",
|
|
||||||
"torchvision>=0.21",
|
|
||||||
]
|
|
||||||
|
|
||||||
[tool.hatch.build.targets.sdist]
|
|
||||||
include = ["immich_model_exporter"]
|
|
||||||
|
|
||||||
[tool.hatch.build.targets.wheel]
|
|
||||||
include = ["immich_model_exporter"]
|
|
||||||
|
|
||||||
[build-system]
|
|
||||||
requires = ["hatchling"]
|
|
||||||
build-backend = "hatchling.build"
|
|
||||||
|
|
||||||
[tool.mypy]
|
|
||||||
python_version = "3.12"
|
|
||||||
follow_imports = "silent"
|
|
||||||
warn_redundant_casts = true
|
|
||||||
disallow_any_generics = true
|
|
||||||
check_untyped_defs = true
|
|
||||||
disallow_untyped_defs = true
|
|
||||||
ignore_missing_imports = true
|
|
||||||
|
|
||||||
[tool.ruff]
|
|
||||||
line-length = 120
|
|
||||||
target-version = "py312"
|
|
||||||
|
|
||||||
[tool.ruff.lint]
|
|
||||||
select = ["E", "F", "I"]
|
|
||||||
|
|
||||||
[tool.black]
|
|
||||||
line-length = 120
|
|
||||||
target-version = ['py312']
|
|
1924
machine-learning/export/uv.lock
generated
1924
machine-learning/export/uv.lock
generated
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user