Sort them by alphablet

This commit is contained in:
yoni13 2025-01-13 01:38:14 +08:00
parent 68fccad462
commit 1775397a84
5 changed files with 32 additions and 41 deletions

View File

@ -85,12 +85,12 @@ services:
image: immich-machine-learning-dev:latest image: immich-machine-learning-dev:latest
# extends: # extends:
# file: hwaccel.ml.yml # file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference # service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl, rknn] for accelerated inference
build: build:
context: ../machine-learning context: ../machine-learning
dockerfile: Dockerfile dockerfile: Dockerfile
args: args:
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl, rknn] for accelerated inference
ports: ports:
- 3003:3003 - 3003:3003
volumes: volumes:

View File

@ -29,12 +29,12 @@ services:
image: immich-machine-learning:latest image: immich-machine-learning:latest
# extends: # extends:
# file: hwaccel.ml.yml # file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference # service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl, rknn] for accelerated inference
build: build:
context: ../machine-learning context: ../machine-learning
dockerfile: Dockerfile dockerfile: Dockerfile
args: args:
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl, rknn] for accelerated inference
ports: ports:
- 3003:3003 - 3003:3003
volumes: volumes:
@ -68,22 +68,12 @@ services:
- 5432:5432 - 5432:5432
healthcheck: healthcheck:
test: >- test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m interval: 5m
start_interval: 30s start_interval: 30s
start_period: 5m start_period: 5m
command: >- command: >-
postgres postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
restart: always restart: always
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics # set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
@ -100,7 +90,7 @@ services:
# add data source for http://immich-prometheus:9090 to get started # add data source for http://immich-prometheus:9090 to get started
immich-grafana: immich-grafana:
container_name: immich_grafana container_name: immich_grafana
command: ['./run.sh', '-disable-reporting'] command: [ './run.sh', '-disable-reporting' ]
ports: ports:
- 3000:3000 - 3000:3000
image: grafana/grafana:11.4.0-ubuntu@sha256:afccec22ba0e4815cca1d2bf3836e414322390dc78d77f1851976ffa8d61051c image: grafana/grafana:11.4.0-ubuntu@sha256:afccec22ba0e4815cca1d2bf3836e414322390dc78d77f1851976ffa8d61051c

View File

@ -32,12 +32,12 @@ services:
immich-machine-learning: immich-machine-learning:
container_name: immich_machine_learning container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag. # For hardware acceleration, add one of -[armnn, cuda, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda # Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration # extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml # file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable # service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes: volumes:
- model-cache:/cache - model-cache:/cache
env_file: env_file:
@ -66,22 +66,12 @@ services:
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data - ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck: healthcheck:
test: >- test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m interval: 5m
start_interval: 30s start_interval: 30s
start_period: 5m start_period: 5m
command: >- command: >-
postgres postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
restart: always restart: always
volumes: volumes:

View File

@ -13,6 +13,18 @@ services:
volumes: volumes:
- /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver) - /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
- /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required) - /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)
rknn:
security_opt:
- systempaths=unconfined
- apparmor=unconfined
devices:
- /dev/rga:/dev/rga
- /dev/dri:/dev/dri
- /dev/dma_heap:/dev/dma_heap
- /dev/mpp_service:/dev/mpp_service
volumes:
- /sys/kernel/debug/:/sys/kernel/debug/:ro
cpu: {} cpu: {}

View File

@ -10,9 +10,9 @@ You do not need to redo any machine learning jobs after enabling hardware accele
## Supported Backends ## Supported Backends
- ARM NN (Mali) - ARM NN (Mali)
- RKNN (Rockchip)
- CUDA (NVIDIA GPUs with [compute capability](https://developer.nvidia.com/cuda-gpus) 5.2 or higher) - CUDA (NVIDIA GPUs with [compute capability](https://developer.nvidia.com/cuda-gpus) 5.2 or higher)
- OpenVINO (Intel discrete GPUs such as Iris Xe and Arc) - OpenVINO (Intel discrete GPUs such as Iris Xe and Arc)
- RKNN (Rockchip)
## Limitations ## Limitations
@ -35,15 +35,6 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- The `hwaccel.ml.yml` file assumes an additional file `/lib/firmware/mali_csffw.bin`, so update accordingly if your device's driver does not require this file - The `hwaccel.ml.yml` file assumes an additional file `/lib/firmware/mali_csffw.bin`, so update accordingly if your device's driver does not require this file
- Optional: Configure your `.env` file, see [environment variables](/docs/install/environment-variables) for ARM NN specific settings - Optional: Configure your `.env` file, see [environment variables](/docs/install/environment-variables) for ARM NN specific settings
#### RKNN
- You must have a supported Rockchip SoC, only RK3566 and RK3588 are supported at this moment.
- Make sure you have the appropriate linux kernel driver installed
- This is usually pre-installed on the device vendor's Linux images
- RKNPU driver V0.9.8 or later must be available in the host server
- You may confirm this by running `cat /sys/kernel/debug/rknpu/version` to check the version
- Optional: Configure your `.env` file, see [environment variables](/docs/install/environment-variables) for RKNN specific settings
#### CUDA #### CUDA
- The GPU must have compute capability 5.2 or greater. - The GPU must have compute capability 5.2 or greater.
@ -56,6 +47,14 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- The server must have a discrete GPU, i.e. Iris Xe or Arc. Expect issues when attempting to use integrated graphics. - The server must have a discrete GPU, i.e. Iris Xe or Arc. Expect issues when attempting to use integrated graphics.
- Ensure the server's kernel version is new enough to use the device for hardware accceleration. - Ensure the server's kernel version is new enough to use the device for hardware accceleration.
#### RKNN
- You must have a supported Rockchip SoC, only RK3566 and RK3588 are supported at this moment.
- Make sure you have the appropriate linux kernel driver installed
- This is usually pre-installed on the device vendor's Linux images
- RKNPU driver V0.9.8 or later must be available in the host server
- You may confirm this by running `cat /sys/kernel/debug/rknpu/version` to check the version
- Optional: Configure your `.env` file, see [environment variables](/docs/install/environment-variables) for RKNN specific settings
## Setup ## Setup
1. If you do not already have it, download the latest [`hwaccel.ml.yml`][hw-file] file and ensure it's in the same folder as the `docker-compose.yml`. 1. If you do not already have it, download the latest [`hwaccel.ml.yml`][hw-file] file and ensure it's in the same folder as the `docker-compose.yml`.