mirror of
https://github.com/immich-app/immich.git
synced 2025-07-08 10:44:15 -04:00
bump versions, run on mich
use 3.12 use 1.19.2
This commit is contained in:
parent
f30fac971a
commit
d9a41b8ea0
@ -11,7 +11,7 @@ You do not need to redo any machine learning jobs after enabling hardware accele
|
|||||||
|
|
||||||
- ARM NN (Mali)
|
- ARM NN (Mali)
|
||||||
- CUDA (NVIDIA GPUs with [compute capability](https://developer.nvidia.com/cuda-gpus) 5.2 or higher)
|
- CUDA (NVIDIA GPUs with [compute capability](https://developer.nvidia.com/cuda-gpus) 5.2 or higher)
|
||||||
- ROCM (AMD GPUs)
|
- ROCm (AMD GPUs)
|
||||||
- OpenVINO (Intel GPUs such as Iris Xe and Arc)
|
- OpenVINO (Intel GPUs such as Iris Xe and Arc)
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
@ -42,9 +42,9 @@ You do not need to redo any machine learning jobs after enabling hardware accele
|
|||||||
- The installed driver must be >= 535 (it must support CUDA 12.2).
|
- The installed driver must be >= 535 (it must support CUDA 12.2).
|
||||||
- On Linux (except for WSL2), you also need to have [NVIDIA Container Toolkit][nvct] installed.
|
- On Linux (except for WSL2), you also need to have [NVIDIA Container Toolkit][nvct] installed.
|
||||||
|
|
||||||
#### ROCM
|
#### ROCm
|
||||||
|
|
||||||
- The GPU must be supported by ROCM (or use `HSA_OVERRIDE_GFX_VERSION=<a supported version, ie 10.3.0>`)
|
- The GPU must be supported by ROCm. If it isn't officially supported, you can attempt to use the `HSA_OVERRIDE_GFX_VERSION` environmental variable: `HSA_OVERRIDE_GFX_VERSION=<a supported version, e.g. 10.3.0>`.
|
||||||
|
|
||||||
#### OpenVINO
|
#### OpenVINO
|
||||||
|
|
||||||
|
@ -17,11 +17,11 @@ RUN mkdir /opt/armnn && \
|
|||||||
|
|
||||||
# Warning: 26.3Gb of disk space required to pull this image
|
# Warning: 26.3Gb of disk space required to pull this image
|
||||||
# https://github.com/microsoft/onnxruntime/blob/main/dockerfiles/Dockerfile.rocm
|
# https://github.com/microsoft/onnxruntime/blob/main/dockerfiles/Dockerfile.rocm
|
||||||
FROM rocm/dev-ubuntu-22.04:6.1.2-complete as builder-rocm
|
FROM rocm/dev-ubuntu-24.04:6.2.4-complete AS builder-rocm
|
||||||
|
|
||||||
WORKDIR /code
|
WORKDIR /code
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends wget git python3.10-venv
|
RUN apt-get update && apt-get install -y --no-install-recommends wget git python3.12-venv
|
||||||
# Install same version as the Dockerfile provided by onnxruntime
|
# Install same version as the Dockerfile provided by onnxruntime
|
||||||
RUN wget -nv https://github.com/Kitware/CMake/releases/download/v3.27.3/cmake-3.27.3-linux-x86_64.sh && \
|
RUN wget -nv https://github.com/Kitware/CMake/releases/download/v3.27.3/cmake-3.27.3-linux-x86_64.sh && \
|
||||||
chmod +x cmake-3.27.3-linux-x86_64.sh && \
|
chmod +x cmake-3.27.3-linux-x86_64.sh && \
|
||||||
@ -32,21 +32,17 @@ RUN wget -nv https://github.com/Kitware/CMake/releases/download/v3.27.3/cmake-3.
|
|||||||
ENV PATH /code/cmake-3.27.3-linux-x86_64/bin:${PATH}
|
ENV PATH /code/cmake-3.27.3-linux-x86_64/bin:${PATH}
|
||||||
|
|
||||||
# Prepare onnxruntime repository & build onnxruntime
|
# Prepare onnxruntime repository & build onnxruntime
|
||||||
RUN git clone --single-branch --branch v1.18.1 --recursive "https://github.com/Microsoft/onnxruntime" onnxruntime
|
# Note: cannot upgrade from 1.19.2 as of writing until upstream updates the ROCm CI
|
||||||
|
RUN git clone --single-branch --branch v1.19.2 --recursive "https://github.com/Microsoft/onnxruntime" onnxruntime
|
||||||
WORKDIR /code/onnxruntime
|
WORKDIR /code/onnxruntime
|
||||||
# EDIT PR
|
# Fix for multi-threading based on comments in https://github.com/microsoft/onnxruntime/pull/19567
|
||||||
# While there's still this PR open, we need to compile on the branch of the PR
|
|
||||||
# https://github.com/microsoft/onnxruntime/pull/19567
|
|
||||||
COPY ./0001-fix-avoid-race-condition-for-rocm-conv-algo-caching.patch /tmp/
|
COPY ./0001-fix-avoid-race-condition-for-rocm-conv-algo-caching.patch /tmp/
|
||||||
RUN git apply /tmp/0001-fix-avoid-race-condition-for-rocm-conv-algo-caching.patch
|
RUN git apply /tmp/0001-fix-avoid-race-condition-for-rocm-conv-algo-caching.patch
|
||||||
# END EDIT PR
|
|
||||||
RUN /bin/sh ./dockerfiles/scripts/install_common_deps.sh
|
RUN /bin/sh ./dockerfiles/scripts/install_common_deps.sh
|
||||||
# I ran into a compilation error when parallelizing the build
|
# Note: the `parallel` setting uses a substantial amount of RAM
|
||||||
# I used 12 threads to build onnxruntime, but it needs more than 16GB of RAM, and that's the amount of RAM I have on my machine
|
RUN ./build.sh --allow_running_as_root --config Release --build_wheel --update --build --parallel 13 --cmake_extra_defines\
|
||||||
# I lowered the number of threads to 8, and it worked
|
ONNXRUNTIME_VERSION=1.19.2 --use_rocm --rocm_home=/opt/rocm
|
||||||
# Even with 12 threads, the compilation took more than 1,5 hours to fail
|
|
||||||
RUN ./build.sh --allow_running_as_root --config Release --build_wheel --update --build --parallel 9 --cmake_extra_defines\
|
|
||||||
ONNXRUNTIME_VERSION=1.18.1 --use_rocm --rocm_home=/opt/rocm
|
|
||||||
RUN mv /code/onnxruntime/build/Linux/Release/dist/*.whl /opt/
|
RUN mv /code/onnxruntime/build/Linux/Release/dist/*.whl /opt/
|
||||||
|
|
||||||
FROM builder-${DEVICE} AS builder
|
FROM builder-${DEVICE} AS builder
|
||||||
@ -117,7 +113,7 @@ COPY --from=builder-armnn \
|
|||||||
/opt/ann/build.sh \
|
/opt/ann/build.sh \
|
||||||
/opt/armnn/
|
/opt/armnn/
|
||||||
|
|
||||||
FROM rocm/dev-ubuntu-22.04:6.1.2-complete AS prod-rocm
|
FROM rocm/dev-ubuntu-24.04:6.2.4-complete AS prod-rocm
|
||||||
|
|
||||||
|
|
||||||
FROM prod-${DEVICE} AS prod
|
FROM prod-${DEVICE} AS prod
|
||||||
|
@ -63,7 +63,12 @@ _INSIGHTFACE_MODELS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
SUPPORTED_PROVIDERS = ["CUDAExecutionProvider", "ROCMExecutionProvider", "OpenVINOExecutionProvider", "CPUExecutionProvider"]
|
SUPPORTED_PROVIDERS = [
|
||||||
|
"CUDAExecutionProvider",
|
||||||
|
"ROCMExecutionProvider",
|
||||||
|
"OpenVINOExecutionProvider",
|
||||||
|
"CPUExecutionProvider",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def get_model_source(model_name: str) -> ModelSource | None:
|
def get_model_source(model_name: str) -> ModelSource | None:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user