merge main

This commit is contained in:
Alex 2025-03-18 11:51:00 -05:00
commit 0ae879e597
No known key found for this signature in database
GPG Key ID: 53CD082B3A5E1082
201 changed files with 6377 additions and 8626 deletions

View File

@ -1,4 +1,4 @@
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:9791f4aa527774bc370c6bd2f6705ce5a686f1e6f204badd8dfaacce28c631ae
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:2ef23730ec68d8511ec8e6e0b82550ca728b256805d81f60ed890f3bfb21cfb9
FROM ${BASEIMAGE}
# Flutter SDK

View File

@ -22,9 +22,9 @@ jobs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@v3
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
mobile:
@ -51,18 +51,18 @@ jobs:
ref="${input_ref:-$github_ref}"
echo "ref=$ref" >> $GITHUB_OUTPUT
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
ref: ${{ steps.get-ref.outputs.ref }}
- uses: actions/setup-java@v4
- uses: actions/setup-java@3a4f6e1af504cf6a31855fa899c6aa5355ba6c12 # v4
with:
distribution: 'zulu'
java-version: '17'
cache: 'gradle'
- name: Setup Flutter SDK
uses: subosito/flutter-action@v2
uses: subosito/flutter-action@44ac965b96f18d999802d4b807e3256d5a3f9fa1 # v2
with:
channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml
@ -89,7 +89,7 @@ jobs:
flutter build apk --release --split-per-abi --target-platform android-arm,android-arm64,android-x64
- name: Publish Android Artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4
with:
name: release-apk-signed
path: mobile/build/app/outputs/flutter-apk/*.apk

View File

@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Cleanup
run: |

View File

@ -29,9 +29,9 @@ jobs:
working-directory: ./cli
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@v4
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './cli/.nvmrc'
registry-url: 'https://registry.npmjs.org'
@ -53,16 +53,16 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.6.0
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.10.0
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
@ -77,7 +77,7 @@ jobs:
- name: Generate docker image tags
id: metadata
uses: docker/metadata-action@v5
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
with:
flavor: |
latest=false
@ -88,7 +88,7 @@ jobs:
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
- name: Build and push image
uses: docker/build-push-action@v6.15.0
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
with:
file: cli/Dockerfile
platforms: linux/amd64,linux/arm64

View File

@ -42,11 +42,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
uses: github/codeql-action/init@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@ -60,7 +60,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
uses: github/codeql-action/autobuild@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
@ -73,6 +73,6 @@ jobs:
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
uses: github/codeql-action/analyze@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3
with:
category: "/language:${{matrix.language}}"

View File

@ -23,9 +23,9 @@ jobs:
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@v3
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
server:
@ -49,7 +49,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
suffix: ["", "-cuda", "-openvino", "-armnn"]
suffix: ['', '-cuda', '-rocm', '-openvino', '-armnn', '-rknn']
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
@ -66,6 +66,21 @@ jobs:
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
run: |
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
TAG_OLD=main${{ matrix.suffix }}
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
retag_server:
name: Re-Tag Server
@ -74,10 +89,10 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
suffix: [""]
suffix: ['']
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@ -120,6 +135,11 @@ jobs:
device: cuda
suffix: -cuda
- platform: linux/amd64
runner: mich
device: rocm
suffix: -rocm
- platform: linux/amd64
runner: ubuntu-latest
device: openvino
@ -130,6 +150,11 @@ jobs:
device: armnn
suffix: -armnn
- platform: linux/arm64
runner: ubuntu-24.04-arm
device: rknn
suffix: -rknn
steps:
- name: Prepare
run: |
@ -137,13 +162,13 @@ jobs:
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.10.0
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
@ -170,7 +195,7 @@ jobs:
- name: Build and push image
id: build
uses: docker/build-push-action@v6.15.0
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
@ -195,7 +220,7 @@ jobs:
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4
with:
name: ml-digests-${{ matrix.device }}-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
@ -215,15 +240,19 @@ jobs:
- device: cpu
- device: cuda
suffix: -cuda
- device: rocm
suffix: -rocm
- device: openvino
suffix: -openvino
- device: armnn
suffix: -armnn
- device: rknn
suffix: -rknn
needs:
- build_and_push_ml
steps:
- name: Download digests
uses: actions/download-artifact@v4
uses: actions/download-artifact@cc203385981b70ca67e1cc392babf9cc229d5806 # v4
with:
path: ${{ runner.temp }}/digests
pattern: ml-digests-${{ matrix.device }}-*
@ -231,26 +260,26 @@ jobs:
- name: Login to Docker Hub
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@v5
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_PR_HEAD_SHA: "true"
DOCKER_METADATA_PR_HEAD_SHA: 'true'
with:
flavor: |
# Disable latest tag
@ -301,13 +330,13 @@ jobs:
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
@ -334,7 +363,7 @@ jobs:
- name: Build and push image
id: build
uses: docker/build-push-action@v6.15.0
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
@ -359,7 +388,7 @@ jobs:
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4
with:
name: server-digests-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
@ -377,7 +406,7 @@ jobs:
- build_and_push_server
steps:
- name: Download digests
uses: actions/download-artifact@v4
uses: actions/download-artifact@cc203385981b70ca67e1cc392babf9cc229d5806 # v4
with:
path: ${{ runner.temp }}/digests
pattern: server-digests-*
@ -385,26 +414,26 @@ jobs:
- name: Login to Docker Hub
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@v5
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_PR_HEAD_SHA: "true"
DOCKER_METADATA_PR_HEAD_SHA: 'true'
with:
flavor: |
# Disable latest tag

View File

@ -18,9 +18,9 @@ jobs:
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@v3
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
docs:
@ -42,10 +42,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './docs/.nvmrc'
@ -59,7 +59,7 @@ jobs:
run: npm run build
- name: Upload build output
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4
with:
name: docs-build-output
path: docs/build/

View File

@ -17,7 +17,7 @@ jobs:
run: echo 'The triggering workflow did not succeed' && exit 1
- name: Get artifact
id: get-artifact
uses: actions/github-script@v7
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
@ -35,7 +35,7 @@ jobs:
return { found: true, id: matchArtifact.id };
- name: Determine deploy parameters
id: parameters
uses: actions/github-script@v7
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
const eventType = context.payload.workflow_run.event;
@ -98,11 +98,11 @@ jobs:
if: ${{ fromJson(needs.checks.outputs.artifact).found && fromJson(needs.checks.outputs.parameters).shouldDeploy }}
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Load parameters
id: parameters
uses: actions/github-script@v7
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
const json = `${{ needs.checks.outputs.parameters }}`;
@ -115,7 +115,7 @@ jobs:
echo "Starting docs deployment for ${{ steps.parameters.outputs.event }} ${{ steps.parameters.outputs.name }}"
- name: Download artifact
uses: actions/github-script@v7
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
let artifact = ${{ needs.checks.outputs.artifact }};
@ -138,7 +138,7 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@v2
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
with:
tg_version: "0.58.12"
tofu_version: "1.7.1"
@ -153,7 +153,7 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@v2
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
with:
tg_version: "0.58.12"
tofu_version: "1.7.1"
@ -167,7 +167,7 @@ jobs:
echo "output=$TG_OUT" >> $GITHUB_OUTPUT
- name: Publish to Cloudflare Pages
uses: cloudflare/pages-action@v1
uses: cloudflare/pages-action@f0a1cd58cd66095dee69bfa18fa5efd1dde93bca # v1
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN_PAGES_UPLOAD }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
@ -184,7 +184,7 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@v2
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
with:
tg_version: '0.58.12'
tofu_version: '1.7.1'
@ -192,7 +192,7 @@ jobs:
tg_command: 'apply'
- name: Comment
uses: actions-cool/maintain-one-comment@v3
uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3
if: ${{ steps.parameters.outputs.event == 'pr' }}
with:
number: ${{ fromJson(needs.checks.outputs.parameters).pr_number }}

View File

@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Destroy Docs Subdomain
env:
@ -18,7 +18,7 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@v2
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
with:
tg_version: "0.58.12"
tofu_version: "1.7.1"
@ -26,7 +26,7 @@ jobs:
tg_command: "destroy -refresh=false"
- name: Comment
uses: actions-cool/maintain-one-comment@v3
uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3
with:
number: ${{ github.event.number }}
delete: true

View File

@ -13,19 +13,19 @@ jobs:
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
uses: actions/create-github-app-token@21cfef2b496dd8ef5b904c159339626a10ad380e # v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: 'Checkout'
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
ref: ${{ github.event.pull_request.head.ref }}
token: ${{ steps.generate-token.outputs.token }}
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './server/.nvmrc'
@ -33,13 +33,13 @@ jobs:
run: make install-all && make format-all
- name: Commit and push
uses: EndBug/add-and-commit@v9
uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9
with:
default_author: github_actions
message: 'chore: fix formatting'
- name: Remove label
uses: actions/github-script@v7
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
if: always()
with:
script: |

View File

@ -12,7 +12,7 @@ jobs:
pull-requests: write
steps:
- name: Require PR to have a changelog label
uses: mheap/github-action-required-labels@v5
uses: mheap/github-action-required-labels@388fd6af37b34cdfe5a23b37060e763217e58b03 # v5
with:
mode: exactly
count: 1

View File

@ -9,4 +9,4 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v5
- uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5

View File

@ -31,25 +31,25 @@ jobs:
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
uses: actions/create-github-app-token@21cfef2b496dd8ef5b904c159339626a10ad380e # v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
token: ${{ steps.generate-token.outputs.token }}
- name: Install uv
uses: astral-sh/setup-uv@v5
uses: astral-sh/setup-uv@f94ec6bedd8674c4426838e6b50417d36b6ab231 # v5
- name: Bump version
run: misc/release/pump-version.sh -s "${{ inputs.serverBump }}" -m "${{ inputs.mobileBump }}"
- name: Commit and tag
id: push-tag
uses: EndBug/add-and-commit@v9
uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9
with:
default_author: github_actions
message: 'chore: version ${{ env.IMMICH_VERSION }}'
@ -70,23 +70,23 @@ jobs:
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
uses: actions/create-github-app-token@21cfef2b496dd8ef5b904c159339626a10ad380e # v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
token: ${{ steps.generate-token.outputs.token }}
- name: Download APK
uses: actions/download-artifact@v4
uses: actions/download-artifact@cc203385981b70ca67e1cc392babf9cc229d5806 # v4
with:
name: release-apk-signed
- name: Create draft release
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2
with:
draft: true
tag_name: ${{ env.IMMICH_VERSION }}

View File

@ -11,7 +11,7 @@ jobs:
permissions:
pull-requests: write
steps:
- uses: mshick/add-pr-comment@v2
- uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2
with:
message-id: "preview-status"
message: "Deploying preview environment to https://pr-${{ github.event.pull_request.number }}.preview.internal.immich.cloud/"
@ -22,7 +22,7 @@ jobs:
permissions:
pull-requests: write
steps:
- uses: actions/github-script@v7
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
github.rest.issues.removeLabel({

View File

@ -15,9 +15,9 @@ jobs:
run:
working-directory: ./open-api/typescript-sdk
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@v4
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './open-api/typescript-sdk/.nvmrc'
registry-url: 'https://registry.npmjs.org'

View File

@ -16,9 +16,9 @@ jobs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@v3
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
mobile:
@ -38,10 +38,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Flutter SDK
uses: subosito/flutter-action@v2
uses: subosito/flutter-action@44ac965b96f18d999802d4b807e3256d5a3f9fa1 # v2
with:
channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml
@ -55,7 +55,7 @@ jobs:
working-directory: ./mobile
- name: Find file changes
uses: tj-actions/verify-changed-files@v20
uses: tj-actions/verify-changed-files@6ed7632824d235029086612d4330d659005af687 # v20
id: verify-changed-files
with:
files: |

View File

@ -23,9 +23,9 @@ jobs:
should_run_e2e_server_cli: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.server == 'true' || steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@v3
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
web:
@ -61,10 +61,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './server/.nvmrc'
@ -98,10 +98,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './cli/.nvmrc'
@ -139,10 +139,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './cli/.nvmrc'
@ -173,10 +173,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './web/.nvmrc'
@ -218,10 +218,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './e2e/.nvmrc'
@ -257,10 +257,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './server/.nvmrc'
@ -282,12 +282,12 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './e2e/.nvmrc'
@ -324,12 +324,12 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './e2e/.nvmrc'
@ -360,9 +360,9 @@ jobs:
if: ${{ needs.pre-job.outputs.should_run_mobile == 'true' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Flutter SDK
uses: subosito/flutter-action@v2
uses: subosito/flutter-action@44ac965b96f18d999802d4b807e3256d5a3f9fa1 # v2
with:
channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml
@ -379,10 +379,10 @@ jobs:
run:
working-directory: ./machine-learning
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Install uv
uses: astral-sh/setup-uv@v5
- uses: actions/setup-python@v5
uses: astral-sh/setup-uv@f94ec6bedd8674c4426838e6b50417d36b6ab231 # v5
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5
# TODO: add caching when supported (https://github.com/actions/setup-python/pull/818)
# with:
# python-version: 3.11
@ -407,7 +407,7 @@ jobs:
name: ShellCheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Run ShellCheck
uses: ludeeus/action-shellcheck@master
with:
@ -421,10 +421,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './server/.nvmrc'
@ -438,7 +438,7 @@ jobs:
run: make open-api
- name: Find file changes
uses: tj-actions/verify-changed-files@v20
uses: tj-actions/verify-changed-files@6ed7632824d235029086612d4330d659005af687 # v20
id: verify-changed-files
with:
files: |
@ -476,10 +476,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@v4
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './server/.nvmrc'
@ -500,7 +500,7 @@ jobs:
run: npm run typeorm:migrations:generate ./src/migrations/TestMigration
- name: Find file changes
uses: tj-actions/verify-changed-files@v20
uses: tj-actions/verify-changed-files@6ed7632824d235029086612d4330d659005af687 # v20
id: verify-changed-files
with:
files: |
@ -519,7 +519,7 @@ jobs:
DB_URL: postgres://postgres:postgres@localhost:5432/immich
- name: Find file changes
uses: tj-actions/verify-changed-files@v20
uses: tj-actions/verify-changed-files@6ed7632824d235029086612d4330d659005af687 # v20
id: verify-changed-sql-files
with:
files: |

View File

@ -11,9 +11,9 @@ jobs:
should_run: ${{ steps.found_paths.outputs.i18n == 'true' && github.head_ref != 'chore/translations'}}
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@v3
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
i18n:
@ -36,7 +36,7 @@ jobs:
exit 1
fi
- name: Find Pull Request
uses: juliangruber/find-pull-request-action@v1
uses: juliangruber/find-pull-request-action@48b6133aa6c826f267ebd33aa2d29470f9d9e7d0 # v1
id: find-pr
with:
branch: chore/translations

4
cli/package-lock.json generated
View File

@ -27,7 +27,7 @@
"@types/lodash-es": "^4.17.12",
"@types/micromatch": "^4.0.9",
"@types/mock-fs": "^4.13.1",
"@types/node": "^22.13.9",
"@types/node": "^22.13.10",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@vitest/coverage-v8": "^3.0.0",
@ -62,7 +62,7 @@
"@oazapfts/runtime": "^1.0.2"
},
"devDependencies": {
"@types/node": "^22.13.9",
"@types/node": "^22.13.10",
"typescript": "^5.3.3"
}
},

View File

@ -21,7 +21,7 @@
"@types/lodash-es": "^4.17.12",
"@types/micromatch": "^4.0.9",
"@types/mock-fs": "^4.13.1",
"@types/node": "^22.13.9",
"@types/node": "^22.13.10",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@vitest/coverage-v8": "^3.0.0",

View File

@ -95,12 +95,12 @@ services:
image: immich-machine-learning-dev:latest
# extends:
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference
build:
context: ../machine-learning
dockerfile: Dockerfile
args:
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
- DEVICE=cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference
ports:
- 3003:3003
volumes:

View File

@ -38,12 +38,12 @@ services:
image: immich-machine-learning:latest
# extends:
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference
build:
context: ../machine-learning
dockerfile: Dockerfile
args:
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
- DEVICE=cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference
ports:
- 3003:3003
volumes:
@ -77,22 +77,12 @@ services:
- 5432:5432
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
restart: always
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
@ -109,7 +99,7 @@ services:
# add data source for http://immich-prometheus:9090 to get started
immich-grafana:
container_name: immich_grafana
command: ['./run.sh', '-disable-reporting']
command: [ './run.sh', '-disable-reporting' ]
ports:
- 3000:3000
image: grafana/grafana:11.5.2-ubuntu@sha256:8b5858c447e06fd7a89006b562ba7bba7c4d5813600c7982374c41852adefaeb

View File

@ -33,12 +33,12 @@ services:
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
@ -67,22 +67,12 @@ services:
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
restart: always
volumes:

View File

@ -14,6 +14,13 @@ services:
- /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
- /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)
rknn:
security_opt:
- systempaths=unconfined
- apparmor=unconfined
devices:
- /dev/dri:/dev/dri
cpu: {}
cuda:
@ -26,6 +33,13 @@ services:
capabilities:
- gpu
rocm:
group_add:
- video
devices:
- /dev/dri:/dev/dri
- /dev/kfd:/dev/kfd
openvino:
device_cgroup_rules:
- 'c 189:* rmw'

View File

@ -11,6 +11,7 @@ The `immich-server` docker image comes preinstalled with an administrative CLI (
| `enable-oauth-login` | Enable OAuth login |
| `disable-oauth-login` | Disable OAuth login |
| `list-users` | List Immich users |
| `version` | Print Immich version |
## How to run a command
@ -80,3 +81,10 @@ immich-admin list-users
}
]
```
Print Immich Version
```
immich-admin version
v1.129.0
```

View File

@ -11,7 +11,9 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- ARM NN (Mali)
- CUDA (NVIDIA GPUs with [compute capability](https://developer.nvidia.com/cuda-gpus) 5.2 or higher)
- ROCm (AMD GPUs)
- OpenVINO (Intel GPUs such as Iris Xe and Arc)
- RKNN (Rockchip)
## Limitations
@ -19,6 +21,7 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- Only Linux and Windows (through WSL2) servers are supported.
- ARM NN is only supported on devices with Mali GPUs. Other Arm devices are not supported.
- Some models may not be compatible with certain backends. CUDA is the most reliable.
- Search latency isn't improved by ARM NN due to model compatibility issues preventing its use. However, smart search jobs do make use of ARM NN.
## Prerequisites
@ -33,6 +36,7 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- The `hwaccel.ml.yml` file assumes the path to it is `/usr/lib/libmali.so`, so update accordingly if it is elsewhere
- The `hwaccel.ml.yml` file assumes an additional file `/lib/firmware/mali_csffw.bin`, so update accordingly if your device's driver does not require this file
- Optional: Configure your `.env` file, see [environment variables](/docs/install/environment-variables) for ARM NN specific settings
- In particular, the `MACHINE_LEARNING_ANN_FP16_TURBO` can significantly improve performance at the cost of very slightly lower accuracy
#### CUDA
@ -41,22 +45,38 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- The installed driver must be >= 535 (it must support CUDA 12.2).
- On Linux (except for WSL2), you also need to have [NVIDIA Container Toolkit][nvct] installed.
#### ROCm
- The GPU must be supported by ROCm. If it isn't officially supported, you can attempt to use the `HSA_OVERRIDE_GFX_VERSION` environmental variable: `HSA_OVERRIDE_GFX_VERSION=<a supported version, e.g. 10.3.0>`. If this doesn't work, you might need to also set `HSA_USE_SVM=0`.
- The ROCm image is quite large and requires at least 35GiB of free disk space. However, pulling later updates to the service through Docker will generally only amount to a few hundred megabytes as the rest will be cached.
- This backend is new and may experience some issues. For example, GPU power consumption can be higher than usual after running inference, even if the machine learning service is idle. In this case, it will only go back to normal after being idle for 5 minutes (configurable with the [MACHINE_LEARNING_MODEL_TTL](/docs/install/environment-variables) setting).
#### OpenVINO
- Integrated GPUs are more likely to experience issues than discrete GPUs, especially for older processors or servers with low RAM.
- Ensure the server's kernel version is new enough to use the device for hardware accceleration.
- Expect higher RAM usage when using OpenVINO compared to CPU processing.
#### RKNN
- You must have a supported Rockchip SoC: only RK3566, RK3568, RK3576 and RK3588 are supported at this moment.
- Make sure you have the appropriate linux kernel driver installed
- This is usually pre-installed on the device vendor's Linux images
- RKNPU driver V0.9.8 or later must be available in the host server
- You may confirm this by running `cat /sys/kernel/debug/rknpu/version` to check the version
- Optional: Configure your `.env` file, see [environment variables](/docs/install/environment-variables) for RKNN specific settings
- In particular, setting `MACHINE_LEARNING_RKNN_THREADS` to 2 or 3 can _dramatically_ improve performance for RK3576 and RK3588 compared to the default of 1, at the expense of multiplying the amount of RAM each model uses by that amount.
## Setup
1. If you do not already have it, download the latest [`hwaccel.ml.yml`][hw-file] file and ensure it's in the same folder as the `docker-compose.yml`.
2. In the `docker-compose.yml` under `immich-machine-learning`, uncomment the `extends` section and change `cpu` to the appropriate backend.
3. Still in `immich-machine-learning`, add one of -[armnn, cuda, openvino] to the `image` section's tag at the end of the line.
3. Still in `immich-machine-learning`, add one of -[armnn, cuda, rocm, openvino] to the `image` section's tag at the end of the line.
4. Redeploy the `immich-machine-learning` container with these updated settings.
### Confirming Device Usage
You can confirm the device is being recognized and used by checking its utilization. There are many tools to display this, such as `nvtop` for NVIDIA or Intel and `intel_gpu_top` for Intel.
You can confirm the device is being recognized and used by checking its utilization. There are many tools to display this, such as `nvtop` for NVIDIA or Intel, `intel_gpu_top` for Intel, and `radeontop` for AMD.
You can also check the logs of the `immich-machine-learning` container. When a Smart Search or Face Detection job begins, or when you search with text in Immich, you should either see a log for `Available ORT providers` containing the relevant provider (e.g. `CUDAExecutionProvider` in the case of CUDA), or a `Loaded ANN model` log entry without errors in the case of ARM NN.
@ -127,3 +147,12 @@ Note that you should increase job concurrencies to increase overall utilization
- If you encounter an error when a model is running, try a different model to see if the issue is model-specific.
- You may want to increase concurrency past the default for higher utilization. However, keep in mind that this will also increase VRAM consumption.
- Larger models benefit more from hardware acceleration, if you have the VRAM for them.
- Compared to ARM NN, RKNPU has:
- Wider model support (including for search, which ARM NN does not accelerate)
- Less heat generation
- Very slightly lower accuracy (RKNPU always uses FP16, while ARM NN by default uses higher precision FP32 unless `MACHINE_LEARNING_ANN_FP16_TURBO` is enabled)
- Varying speed (tested on RK3588):
- If `MACHINE_LEARNING_RKNN_THREADS` is at the default of 1, RKNPU will have substantially lower throughput for ML jobs than ARM NN in most cases, but similar latency (such as when searching)
- If `MACHINE_LEARNING_RKNN_THREADS` is set to 3, it will be somewhat faster than ARM NN at FP32, but somewhat slower than ARM NN if `MACHINE_LEARNING_ANN_FP16_TURBO` is enabled
- When other tasks also use the GPU (like transcoding), RKNPU has a significant advantage over ARM NN as it uses the otherwise idle NPU instead of competing for GPU usage
- Lower RAM usage if `MACHINE_LEARNING_RKNN_THREADS` is at the default of 1, but significantly higher if greater than 1 (which is necessary for it to fully utilize the NPU and hence be comparable in speed to ARM NN)

View File

@ -23,12 +23,12 @@ name: immich_remote_ml
services:
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.ml.yml
# service: # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
# service: # set to one of [armnn, cuda, rocm, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
restart: always

View File

@ -170,6 +170,8 @@ Redis (Sentinel) URL example JSON before encoding:
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning |
| `MACHINE_LEARNING_PING_TIMEOUT` | How long (ms) to wait for a PING response when checking if an ML server is available | `2000` | server |
| `MACHINE_LEARNING_AVAILABILITY_BACKOFF_TIME` | How long to ignore ML servers that are offline before trying again | `30000` | server |
| `MACHINE_LEARNING_RKNN` | Enable RKNN hardware acceleration if supported | `True` | machine learning |
| `MACHINE_LEARNING_RKNN_THREADS` | How many threads of RKNN runtime should be spinned up while inferencing. | `1` | machine learning |
\*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones.

6
e2e/package-lock.json generated
View File

@ -15,7 +15,7 @@
"@immich/sdk": "file:../open-api/typescript-sdk",
"@playwright/test": "^1.44.1",
"@types/luxon": "^3.4.2",
"@types/node": "^22.13.9",
"@types/node": "^22.13.10",
"@types/oidc-provider": "^8.5.1",
"@types/pg": "^8.11.0",
"@types/pngjs": "^6.0.4",
@ -67,7 +67,7 @@
"@types/lodash-es": "^4.17.12",
"@types/micromatch": "^4.0.9",
"@types/mock-fs": "^4.13.1",
"@types/node": "^22.13.9",
"@types/node": "^22.13.10",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@vitest/coverage-v8": "^3.0.0",
@ -102,7 +102,7 @@
"@oazapfts/runtime": "^1.0.2"
},
"devDependencies": {
"@types/node": "^22.13.9",
"@types/node": "^22.13.10",
"typescript": "^5.3.3"
}
},

View File

@ -25,7 +25,7 @@
"@immich/sdk": "file:../open-api/typescript-sdk",
"@playwright/test": "^1.44.1",
"@types/luxon": "^3.4.2",
"@types/node": "^22.13.9",
"@types/node": "^22.13.10",
"@types/oidc-provider": "^8.5.1",
"@types/pg": "^8.11.0",
"@types/pngjs": "^6.0.4",

View File

@ -45,7 +45,7 @@ test.describe('Shared Links', () => {
await page.goto(`/share/${sharedLink.key}`);
await page.getByRole('heading', { name: 'Test Album' }).waitFor();
await page.locator(`[data-asset-id="${asset.id}"]`).hover();
await page.waitForSelector('#asset-group-by-date svg');
await page.waitForSelector('[data-group] svg');
await page.getByRole('checkbox').click();
await page.getByRole('button', { name: 'Download' }).click();
await page.getByText('DOWNLOADING', { exact: true }).waitFor();

View File

@ -1082,7 +1082,9 @@
"remove_url": "Remove URL",
"remove_user": "Remove user",
"removed_api_key": "Removed API Key: {name}",
"remove_memory": "Remove memory",
"removed_memory": "Removed memory",
"remove_photo_from_memory": "Remove photo from this memory",
"removed_photo_from_memory": "Removed photo from memory",
"removed_from_archive": "Removed from archive",
"removed_from_favorites": "Removed from favorites",

View File

@ -1,5 +1,24 @@
*.zip
*.onnx
*.rknn
*.npy
*_attr__value
*.weight
*.bias
onnx__*
*in_proj_bias
*.proj
*.latent
*.pos_embed
vocab.txt
export/immich_model_exporter/models/**/README.md
tokenizer.json
tokenizer_config.json
special_tokens_map.json
preprocess_cfg.json
config.json
merges.txt
vocab.json
upload/
venv/
__pycache__/

View File

@ -15,6 +15,36 @@ RUN mkdir /opt/armnn && \
cd /opt/ann && \
sh build.sh
FROM builder-cpu AS builder-rknn
# Warning: 25GiB+ disk space required to pull this image
# TODO: find a way to reduce the image size
FROM rocm/dev-ubuntu-22.04:6.3.4-complete AS builder-rocm
WORKDIR /code
RUN apt-get update && apt-get install -y --no-install-recommends wget git python3.10-venv
RUN wget -nv https://github.com/Kitware/CMake/releases/download/v3.30.1/cmake-3.30.1-linux-x86_64.sh && \
chmod +x cmake-3.30.1-linux-x86_64.sh && \
mkdir -p /code/cmake-3.30.1-linux-x86_64 && \
./cmake-3.30.1-linux-x86_64.sh --skip-license --prefix=/code/cmake-3.30.1-linux-x86_64 && \
rm cmake-3.30.1-linux-x86_64.sh
ENV PATH=/code/cmake-3.30.1-linux-x86_64/bin:${PATH}
RUN git clone --single-branch --branch v1.20.1 --recursive "https://github.com/Microsoft/onnxruntime" onnxruntime
WORKDIR /code/onnxruntime
# Fix for multi-threading based on comments in https://github.com/microsoft/onnxruntime/pull/19567
# TODO: find a way to fix this without disabling algo caching
COPY ./patches/* /tmp/
RUN git apply /tmp/*.patch
RUN /bin/sh ./dockerfiles/scripts/install_common_deps.sh
# Note: the `parallel` setting uses a substantial amount of RAM
RUN ./build.sh --allow_running_as_root --config Release --build_wheel --update --build --parallel 17 --cmake_extra_defines\
ONNXRUNTIME_VERSION=1.20.1 --skip_tests --use_rocm --rocm_home=/opt/rocm
RUN mv /code/onnxruntime/build/Linux/Release/dist/*.whl /opt/
FROM builder-${DEVICE} AS builder
ARG DEVICE
@ -30,6 +60,9 @@ RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=uv.lock,target=uv.lock \
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
uv sync --frozen --extra ${DEVICE} --no-dev --no-editable --no-install-project --compile-bytecode --no-progress --active --link-mode copy
RUN if [ "$DEVICE" = "rocm" ]; then \
uv pip install /opt/onnxruntime_rocm-*.whl; \
fi
FROM python:3.11-slim-bookworm@sha256:614c8691ab74150465ec9123378cd4dde7a6e57be9e558c3108df40664667a4c AS prod-cpu
@ -37,10 +70,10 @@ FROM prod-cpu AS prod-openvino
RUN apt-get update && \
apt-get install --no-install-recommends -yqq ocl-icd-libopencl1 wget && \
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17384.11/intel-igc-core_1.0.17384.11_amd64.deb && \
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17384.11/intel-igc-opencl_1.0.17384.11_amd64.deb && \
wget https://github.com/intel/compute-runtime/releases/download/24.31.30508.7/intel-opencl-icd_24.31.30508.7_amd64.deb && \
wget https://github.com/intel/compute-runtime/releases/download/24.31.30508.7/libigdgmm12_22.4.1_amd64.deb && \
wget -nv https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17384.11/intel-igc-core_1.0.17384.11_amd64.deb && \
wget -nv https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17384.11/intel-igc-opencl_1.0.17384.11_amd64.deb && \
wget -nv https://github.com/intel/compute-runtime/releases/download/24.31.30508.7/intel-opencl-icd_24.31.30508.7_amd64.deb && \
wget -nv https://github.com/intel/compute-runtime/releases/download/24.31.30508.7/libigdgmm12_22.4.1_amd64.deb && \
dpkg -i *.deb && \
rm *.deb && \
apt-get remove wget -yqq && \
@ -57,6 +90,8 @@ COPY --from=builder-cuda /usr/local/bin/python3 /usr/local/bin/python3
COPY --from=builder-cuda /usr/local/lib/python3.11 /usr/local/lib/python3.11
COPY --from=builder-cuda /usr/local/lib/libpython3.11.so /usr/local/lib/libpython3.11.so
FROM rocm/dev-ubuntu-22.04:6.3.4-complete AS prod-rocm
FROM prod-cpu AS prod-armnn
ENV LD_LIBRARY_PATH=/opt/armnn
@ -77,11 +112,14 @@ COPY --from=builder-armnn \
/opt/ann/build.sh \
/opt/armnn/
FROM prod-cpu AS prod-rknn
FROM prod-${DEVICE} AS prod
ARG DEVICE
RUN apt-get update && \
apt-get install -y --no-install-recommends tini $(if ! [ "$DEVICE" = "openvino" ]; then echo "libmimalloc2.0"; fi) && \
apt-get install -y --no-install-recommends tini $(if ! [ "$DEVICE" = "openvino" ] && ! [ "$DEVICE" = "rocm" ]; then echo "libmimalloc2.0"; fi) && \
apt-get autoremove -yqq && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*

View File

@ -7,7 +7,7 @@
This project uses [uv](https://docs.astral.sh/uv/getting-started/installation/), so be sure to install it first.
Running `uv sync --extra cpu` will install everything you need in an isolated virtual environment.
CUDA and OpenVINO are supported as acceleration APIs. To use them, you can replace `--group cpu` with either of `--group cuda` or `--group openvino`. In the case of CUDA, a [compute capability](https://developer.nvidia.com/cuda-gpus) of 5.2 or higher is required.
CUDA, ROCM and OpenVINO are supported as acceleration APIs. To use them, you can replace `--extra cpu` with either of `--extra cuda`, `--extra rocm` or `--extra openvino`. In the case of CUDA, a [compute capability](https://developer.nvidia.com/cuda-gpus) of 5.2 or higher is required.
To add or remove dependencies, you can use the commands `uv add $PACKAGE_NAME` and `uv remove $PACKAGE_NAME`, respectively.
Be sure to commit the `uv.lock` and `pyproject.toml` files with `uv lock` to reflect any changes in dependencies.

View File

@ -64,6 +64,8 @@ class Settings(BaseSettings):
ann: bool = True
ann_fp16_turbo: bool = False
ann_tuning_level: int = 2
rknn: bool = True
rknn_threads: int = 1
preload: PreloadModelData | None = None
max_batch_size: MaxBatchSize | None = None

View File

@ -136,6 +136,12 @@ def ann_session() -> Iterator[mock.Mock]:
yield mocked
@pytest.fixture(scope="function")
def rknn_session() -> Iterator[mock.Mock]:
with mock.patch("app.sessions.rknn.RknnPoolExecutor") as mocked:
yield mocked
@pytest.fixture(scope="function")
def rmtree() -> Iterator[mock.Mock]:
with mock.patch("app.models.base.rmtree", autospec=True) as mocked:

View File

@ -226,9 +226,9 @@ async def load(model: InferenceModel) -> InferenceModel:
except FileNotFoundError as e:
if model.model_format == ModelFormat.ONNX:
raise e
log.exception(e)
log.warning(
f"{model.model_format.upper()} is available, but model '{model.model_name}' does not support it."
f"{model.model_format.upper()} is available, but model '{model.model_name}' does not support it.",
exc_info=e,
)
model.model_format = ModelFormat.ONNX
model.load()

View File

@ -8,6 +8,7 @@ from typing import Any, ClassVar
from huggingface_hub import snapshot_download
import ann.ann
import app.sessions.rknn as rknn
from app.sessions.ort import OrtSession
from ..config import clean_name, log, settings
@ -66,12 +67,17 @@ class InferenceModel(ABC):
pass
def _download(self) -> None:
ignore_patterns = [] if self.model_format == ModelFormat.ARMNN else ["*.armnn"]
ignored_patterns: dict[ModelFormat, list[str]] = {
ModelFormat.ONNX: ["*.armnn", "*.rknn"],
ModelFormat.ARMNN: ["*.rknn"],
ModelFormat.RKNN: ["*.armnn"],
}
snapshot_download(
f"immich-app/{clean_name(self.model_name)}",
cache_dir=self.cache_dir,
local_dir=self.cache_dir,
ignore_patterns=ignore_patterns,
ignore_patterns=ignored_patterns.get(self.model_format, []),
)
def _load(self) -> ModelSession:
@ -108,17 +114,25 @@ class InferenceModel(ABC):
session: ModelSession = AnnSession(model_path)
case ".onnx":
session = OrtSession(model_path)
case ".rknn":
session = rknn.RknnSession(model_path)
case _:
raise ValueError(f"Unsupported model file type: {model_path.suffix}")
return session
def model_path_for_format(self, model_format: ModelFormat) -> Path:
model_path_prefix = rknn.model_prefix if model_format == ModelFormat.RKNN else None
if model_path_prefix:
return self.model_dir / model_path_prefix / f"model.{model_format}"
return self.model_dir / f"model.{model_format}"
@property
def model_dir(self) -> Path:
return self.cache_dir / self.model_type.value
@property
def model_path(self) -> Path:
return self.model_dir / f"model.{self.model_format}"
return self.model_path_for_format(self.model_format)
@property
def model_task(self) -> ModelTask:
@ -155,4 +169,9 @@ class InferenceModel(ABC):
@property
def _model_format_default(self) -> ModelFormat:
return ModelFormat.ARMNN if ann.ann.is_available and settings.ann else ModelFormat.ONNX
if rknn.is_available:
return ModelFormat.RKNN
elif ann.ann.is_available and settings.ann:
return ModelFormat.ARMNN
else:
return ModelFormat.ONNX

View File

@ -44,6 +44,18 @@ _OPENCLIP_MODELS = {
"nllb-clip-base-siglip__v1",
"nllb-clip-large-siglip__mrl",
"nllb-clip-large-siglip__v1",
"ViT-B-16-SigLIP2__webli",
"ViT-B-32-SigLIP2-256__webli",
"ViT-L-16-SigLIP2-256__webli",
"ViT-L-16-SigLIP2-384__webli",
"ViT-L-16-SigLIP2-512__webli",
"ViT-SO400M-14-SigLIP2-378__webli",
"ViT-SO400M-14-SigLIP2__webli",
"ViT-SO400M-16-SigLIP2-256__webli",
"ViT-SO400M-16-SigLIP2-384__webli",
"ViT-SO400M-16-SigLIP2-512__webli",
"ViT-gopt-16-SigLIP2-256__webli",
"ViT-gopt-16-SigLIP2-384__webli",
}
@ -63,7 +75,15 @@ _INSIGHTFACE_MODELS = {
}
SUPPORTED_PROVIDERS = ["CUDAExecutionProvider", "OpenVINOExecutionProvider", "CPUExecutionProvider"]
SUPPORTED_PROVIDERS = [
"CUDAExecutionProvider",
"ROCMExecutionProvider",
"OpenVINOExecutionProvider",
"CPUExecutionProvider",
]
RKNN_SUPPORTED_SOCS = ["rk3566", "rk3568", "rk3576", "rk3588"]
RKNN_COREMASK_SUPPORTED_SOCS = ["rk3576", "rk3588"]
def get_model_source(model_name: str) -> ModelSource | None:

View File

@ -31,7 +31,7 @@ class FaceRecognizer(InferenceModel):
self._add_batch_axis(self.model_path)
session = self._make_session(self.model_path)
self.model = ArcFaceONNX(
self.model_path.with_suffix(".onnx").as_posix(),
self.model_path_for_format(ModelFormat.ONNX).as_posix(),
session=session,
)
return session

View File

@ -35,6 +35,7 @@ class ModelType(StrEnum):
class ModelFormat(StrEnum):
ARMNN = "armnn"
ONNX = "onnx"
RKNN = "rknn"
class ModelSource(StrEnum):

View File

@ -88,7 +88,7 @@ class OrtSession:
match provider:
case "CPUExecutionProvider":
options = {"arena_extend_strategy": "kSameAsRequested"}
case "CUDAExecutionProvider":
case "CUDAExecutionProvider" | "ROCMExecutionProvider":
options = {"arena_extend_strategy": "kSameAsRequested", "device_id": settings.device_id}
case "OpenVINOExecutionProvider":
options = {

View File

@ -0,0 +1,76 @@
from __future__ import annotations
from pathlib import Path
from typing import Any, NamedTuple
import numpy as np
from numpy.typing import NDArray
from app.config import log, settings
from app.schemas import SessionNode
from .rknnpool import RknnPoolExecutor, is_available, soc_name
is_available = is_available and settings.rknn
model_prefix = Path("rknpu") / soc_name if is_available and soc_name is not None else None
def run_inference(rknn_lite: Any, input: list[NDArray[np.float32]]) -> list[NDArray[np.float32]]:
outputs: list[NDArray[np.float32]] = rknn_lite.inference(inputs=input, data_format="nchw")
return outputs
input_output_mapping: dict[str, dict[str, Any]] = {
"detection": {
"input": {"norm_tensor:0": (1, 3, 640, 640)},
"output": {
"norm_tensor:1": (12800, 1),
"norm_tensor:2": (3200, 1),
"norm_tensor:3": (800, 1),
"norm_tensor:4": (12800, 4),
"norm_tensor:5": (3200, 4),
"norm_tensor:6": (800, 4),
"norm_tensor:7": (12800, 10),
"norm_tensor:8": (3200, 10),
"norm_tensor:9": (800, 10),
},
},
"recognition": {"input": {"norm_tensor:0": (1, 3, 112, 112)}, "output": {"norm_tensor:1": (1, 512)}},
}
class RknnSession:
def __init__(self, model_path: Path) -> None:
self.model_type = "detection" if "detection" in model_path.parts else "recognition"
self.tpe = settings.rknn_threads
log.info(f"Loading RKNN model from {model_path} with {self.tpe} threads.")
self.rknnpool = RknnPoolExecutor(model_path=model_path.as_posix(), tpes=self.tpe, func=run_inference)
log.info(f"Loaded RKNN model from {model_path} with {self.tpe} threads.")
def get_inputs(self) -> list[SessionNode]:
return [RknnNode(name=k, shape=v) for k, v in input_output_mapping[self.model_type]["input"].items()]
def get_outputs(self) -> list[SessionNode]:
return [RknnNode(name=k, shape=v) for k, v in input_output_mapping[self.model_type]["output"].items()]
def run(
self,
output_names: list[str] | None,
input_feed: dict[str, NDArray[np.float32]] | dict[str, NDArray[np.int32]],
run_options: Any = None,
) -> list[NDArray[np.float32]]:
input_data: list[NDArray[np.float32]] = [np.ascontiguousarray(v) for v in input_feed.values()]
self.rknnpool.put(input_data)
res = self.rknnpool.get()
if res is None:
raise RuntimeError("RKNN inference failed!")
return res
class RknnNode(NamedTuple):
name: str | None
shape: tuple[int, ...]
__all__ = ["RknnSession", "RknnNode", "is_available", "soc_name", "model_prefix"]

View File

@ -0,0 +1,91 @@
# This code is from leafqycc/rknn-multi-threaded
# Following Apache License 2.0
import logging
from concurrent.futures import Future, ThreadPoolExecutor
from pathlib import Path
from queue import Queue
from typing import Callable
import numpy as np
from numpy.typing import NDArray
from app.config import log
from app.models.constants import RKNN_COREMASK_SUPPORTED_SOCS, RKNN_SUPPORTED_SOCS
def get_soc(device_tree_path: Path | str) -> str | None:
try:
with Path(device_tree_path).open() as f:
device_compatible_str = f.read()
for soc in RKNN_SUPPORTED_SOCS:
if soc in device_compatible_str:
return soc
log.warning("Device is not supported for RKNN")
except OSError as e:
log.warning(f"Could not read {device_tree_path}. Reason: %s", e)
return None
soc_name = None
is_available = False
try:
from rknnlite.api import RKNNLite
soc_name = get_soc("/proc/device-tree/compatible")
is_available = soc_name is not None
except ImportError:
log.debug("RKNN is not available")
def init_rknn(model_path: str) -> "RKNNLite":
if not is_available:
raise RuntimeError("rknn is not available!")
rknn_lite = RKNNLite()
rknn_lite.rknn_log.logger.setLevel(logging.ERROR)
ret = rknn_lite.load_rknn(model_path)
if ret != 0:
raise RuntimeError("Failed to load RKNN model")
if soc_name in RKNN_COREMASK_SUPPORTED_SOCS:
ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_AUTO)
else:
ret = rknn_lite.init_runtime() # Please do not set this parameter on other platforms.
if ret != 0:
raise RuntimeError("Failed to inititalize RKNN runtime environment")
return rknn_lite
class RknnPoolExecutor:
def __init__(
self,
model_path: str,
tpes: int,
func: Callable[["RKNNLite", list[NDArray[np.float32]]], list[NDArray[np.float32]]],
) -> None:
self.tpes = tpes
self.queue: Queue[Future[list[NDArray[np.float32]]]] = Queue()
self.rknn_pool = [init_rknn(model_path) for _ in range(tpes)]
self.pool = ThreadPoolExecutor(max_workers=tpes)
self.func = func
self.num = 0
def put(self, inputs: list[NDArray[np.float32]]) -> None:
self.queue.put(self.pool.submit(self.func, self.rknn_pool[self.num % self.tpes], inputs))
self.num += 1
def get(self) -> list[NDArray[np.float32]] | None:
if self.queue.empty():
return None
fut = self.queue.get()
return fut.result()
def release(self) -> None:
self.pool.shutdown()
for rknn_lite in self.rknn_pool:
rknn_lite.release()
def __del__(self) -> None:
self.release()

View File

@ -25,6 +25,7 @@ from app.models.facial_recognition.detection import FaceDetector
from app.models.facial_recognition.recognition import FaceRecognizer
from app.sessions.ann import AnnSession
from app.sessions.ort import OrtSession
from app.sessions.rknn import RknnSession, run_inference
from .config import Settings, settings
from .models.base import InferenceModel
@ -69,6 +70,14 @@ class TestBase:
assert encoder.model_format == ModelFormat.ARMNN
def test_sets_default_model_format_to_rknn_if_available(self, mocker: MockerFixture) -> None:
mocker.patch.object(settings, "rknn", True)
mocker.patch("app.sessions.rknn.is_available", True)
encoder = OpenClipTextualEncoder("ViT-B-32__openai")
assert encoder.model_format == ModelFormat.RKNN
def test_casts_cache_dir_string_to_path(self) -> None:
cache_dir = "/test_cache"
encoder = OpenClipTextualEncoder("ViT-B-32__openai", cache_dir=cache_dir)
@ -125,7 +134,7 @@ class TestBase:
"immich-app/ViT-B-32__openai",
cache_dir=encoder.cache_dir,
local_dir=encoder.cache_dir,
ignore_patterns=["*.armnn"],
ignore_patterns=["*.armnn", "*.rknn"],
)
def test_download_downloads_armnn_if_preferred_format(self, snapshot_download: mock.Mock) -> None:
@ -136,7 +145,18 @@ class TestBase:
"immich-app/ViT-B-32__openai",
cache_dir=encoder.cache_dir,
local_dir=encoder.cache_dir,
ignore_patterns=[],
ignore_patterns=["*.rknn"],
)
def test_download_downloads_rknn_if_preferred_format(self, snapshot_download: mock.Mock) -> None:
encoder = OpenClipTextualEncoder("ViT-B-32__openai", model_format=ModelFormat.RKNN)
encoder.download()
snapshot_download.assert_called_once_with(
"immich-app/ViT-B-32__openai",
cache_dir=encoder.cache_dir,
local_dir=encoder.cache_dir,
ignore_patterns=["*.armnn"],
)
def test_throws_exception_if_model_path_does_not_exist(
@ -160,6 +180,7 @@ class TestOrtSession:
OV_EP = ["OpenVINOExecutionProvider", "CPUExecutionProvider"]
CUDA_EP_OUT_OF_ORDER = ["CPUExecutionProvider", "CUDAExecutionProvider"]
TRT_EP = ["TensorrtExecutionProvider", "CUDAExecutionProvider", "CPUExecutionProvider"]
ROCM_EP = ["ROCMExecutionProvider", "CPUExecutionProvider"]
@pytest.mark.providers(CPU_EP)
def test_sets_cpu_provider(self, providers: list[str]) -> None:
@ -199,6 +220,12 @@ class TestOrtSession:
assert session.providers == self.CUDA_EP
@pytest.mark.providers(ROCM_EP)
def test_uses_rocm(self, providers: list[str]) -> None:
session = OrtSession("ViT-B-32__openai")
assert session.providers == self.ROCM_EP
def test_sets_provider_kwarg(self) -> None:
providers = ["CUDAExecutionProvider"]
session = OrtSession("ViT-B-32__openai", providers=providers)
@ -215,19 +242,33 @@ class TestOrtSession:
{"arena_extend_strategy": "kSameAsRequested"},
]
def test_sets_device_id_for_openvino(self) -> None:
def test_sets_provider_options_for_openvino(self) -> None:
model_path = "/cache/ViT-B-32__openai/textual/model.onnx"
os.environ["MACHINE_LEARNING_DEVICE_ID"] = "1"
session = OrtSession("ViT-B-32__openai", providers=["OpenVINOExecutionProvider"])
session = OrtSession(model_path, providers=["OpenVINOExecutionProvider"])
assert session.provider_options[0]["device_type"] == "GPU.1"
assert session.provider_options == [
{
"device_type": "GPU.1",
"precision": "FP32",
"cache_dir": "/cache/ViT-B-32__openai/textual/openvino",
}
]
def test_sets_device_id_for_cuda(self) -> None:
def test_sets_provider_options_for_cuda(self) -> None:
os.environ["MACHINE_LEARNING_DEVICE_ID"] = "1"
session = OrtSession("ViT-B-32__openai", providers=["CUDAExecutionProvider"])
assert session.provider_options[0]["device_id"] == "1"
assert session.provider_options == [{"arena_extend_strategy": "kSameAsRequested", "device_id": "1"}]
def test_sets_provider_options_for_rocm(self) -> None:
os.environ["MACHINE_LEARNING_DEVICE_ID"] = "1"
session = OrtSession("ViT-B-32__openai", providers=["ROCMExecutionProvider"])
assert session.provider_options == [{"arena_extend_strategy": "kSameAsRequested", "device_id": "1"}]
def test_sets_provider_options_kwarg(self) -> None:
session = OrtSession(
@ -328,6 +369,33 @@ class TestAnnSession:
np_spy.assert_has_calls([mock.call(input1), mock.call(input2)])
class TestRknnSession:
def test_creates_rknn_session(self, rknn_session: mock.Mock, info: mock.Mock, mocker: MockerFixture) -> None:
model_path = mock.MagicMock(spec=Path)
tpe = 1
mocker.patch("app.sessions.rknn.soc_name", "rk3566")
mocker.patch("app.sessions.rknn.is_available", True)
RknnSession(model_path)
rknn_session.assert_called_once_with(model_path=model_path.as_posix(), tpes=tpe, func=run_inference)
info.assert_has_calls([mock.call(f"Loaded RKNN model from {model_path} with {tpe} threads.")])
def test_run_rknn(self, rknn_session: mock.Mock, mocker: MockerFixture) -> None:
rknn_session.return_value.load.return_value = 123
np_spy = mocker.spy(np, "ascontiguousarray")
mocker.patch("app.sessions.rknn.soc_name", "rk3566")
session = RknnSession(Path("ViT-B-32__openai"))
[input1, input2] = [np.random.rand(1, 3, 224, 224).astype(np.float32) for _ in range(2)]
input_feed = {"input.1": input1, "input.2": input2}
session.run(None, input_feed)
rknn_session.return_value.put.assert_called_once_with([input1, input2])
np_spy.call_count == 2
np_spy.assert_has_calls([mock.call(input1), mock.call(input2)])
class TestCLIP:
embedding = np.random.rand(512).astype(np.float32)
cache_dir = Path("test_cache")
@ -829,9 +897,7 @@ class TestLoad:
mock_model.clear_cache.assert_not_called()
mock_model.load.assert_not_called()
async def test_falls_back_to_onnx_if_other_format_does_not_exist(
self, exception: mock.Mock, warning: mock.Mock
) -> None:
async def test_falls_back_to_onnx_if_other_format_does_not_exist(self, warning: mock.Mock) -> None:
mock_model = mock.Mock(spec=InferenceModel)
mock_model.model_name = "test_model_name"
mock_model.model_type = ModelType.VISUAL
@ -846,8 +912,9 @@ class TestLoad:
mock_model.clear_cache.assert_not_called()
assert mock_model.load.call_count == 2
exception.assert_called_once_with(error)
warning.assert_called_once_with("ARMNN is available, but model 'test_model_name' does not support it.")
warning.assert_called_once_with(
"ARMNN is available, but model 'test_model_name' does not support it.", exc_info=error
)
mock_model.model_format = ModelFormat.ONNX

View File

@ -0,0 +1 @@
3.12

View File

@ -1,20 +0,0 @@
FROM mambaorg/micromamba:bookworm-slim@sha256:e3797091302382ea841498bc93a7b0a50f7c1448333d5e946d2d1608d0c5f43d AS builder
ENV TRANSFORMERS_CACHE=/cache \
PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PATH="/opt/venv/bin:$PATH" \
PYTHONPATH=/usr/src
COPY --chown=$MAMBA_USER:$MAMBA_USER conda-lock.yml /tmp/conda-lock.yml
RUN micromamba install -y -n base -f /tmp/conda-lock.yml && \
micromamba remove -y -n base cxx-compiler && \
micromamba clean --all --yes
WORKDIR /usr/src/app
COPY --chown=$MAMBA_USER:$MAMBA_USER start.sh .
COPY --chown=$MAMBA_USER:$MAMBA_USER app .
ENTRYPOINT ["/usr/local/bin/_entrypoint.sh"]
CMD ["./start.sh"]

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +0,0 @@
name: base
channels:
- conda-forge
platforms:
- linux-64
- linux-aarch64
dependencies:
- black
- conda-lock
- mypy
- pytest
- pytest-cov
- pytest-mock
- ruff
category: dev

View File

@ -1,25 +0,0 @@
name: base
channels:
- conda-forge
- nvidia
- pytorch
platforms:
- linux-64
dependencies:
- cxx-compiler
- onnx==1.*
- onnxruntime==1.*
- open-clip-torch==2.*
- orjson==3.*
- pip
- python==3.11.*
- pytorch>=2.3
- rich==13.*
- safetensors==0.*
- setuptools==68.*
- torchvision
- transformers==4.*
- pip:
- multilingual-clip
- onnxsim
category: main

View File

@ -0,0 +1,98 @@
from pathlib import Path
import typer
from tenacity import retry, stop_after_attempt, wait_fixed
from typing_extensions import Annotated
from .exporters.constants import DELETE_PATTERNS, SOURCE_TO_METADATA, ModelSource
from .exporters.onnx import export as onnx_export
from .exporters.rknn import export as rknn_export
app = typer.Typer(pretty_exceptions_show_locals=False)
def generate_readme(model_name: str, model_source: ModelSource) -> str:
(name, link, type) = SOURCE_TO_METADATA[model_source]
match model_source:
case ModelSource.MCLIP:
tags = ["immich", "clip", "multilingual"]
case ModelSource.OPENCLIP:
tags = ["immich", "clip"]
lowered = model_name.lower()
if "xlm" in lowered or "nllb" in lowered:
tags.append("multilingual")
case ModelSource.INSIGHTFACE:
tags = ["immich", "facial-recognition"]
case _:
raise ValueError(f"Unsupported model source {model_source}")
return f"""---
tags:
{" - " + "\n - ".join(tags)}
---
# Model Description
This repo contains ONNX exports for the associated {type} model by {name}. See the [{name}]({link}) repo for more info.
This repo is specifically intended for use with [Immich](https://immich.app/), a self-hosted photo library.
"""
@app.command()
def main(
model_name: str,
model_source: ModelSource,
output_dir: Path = Path("./models"),
no_cache: bool = False,
hf_organization: str = "immich-app",
hf_auth_token: Annotated[str | None, typer.Option(envvar="HF_AUTH_TOKEN")] = None,
) -> None:
hf_model_name = model_name.split("/")[-1]
hf_model_name = hf_model_name.replace("xlm-roberta-large", "XLM-Roberta-Large")
hf_model_name = hf_model_name.replace("xlm-roberta-base", "XLM-Roberta-Base")
output_dir = output_dir / hf_model_name
match model_source:
case ModelSource.MCLIP | ModelSource.OPENCLIP:
output_dir.mkdir(parents=True, exist_ok=True)
onnx_export(model_name, model_source, output_dir, no_cache=no_cache)
case ModelSource.INSIGHTFACE:
from huggingface_hub import snapshot_download
# TODO: start from insightface dump instead of downloading from HF
snapshot_download(f"immich-app/{hf_model_name}", local_dir=output_dir)
case _:
raise ValueError(f"Unsupported model source {model_source}")
try:
rknn_export(output_dir, no_cache=no_cache)
except Exception as e:
print(f"Failed to export model {model_name} to rknn: {e}")
(output_dir / "rknpu").unlink(missing_ok=True)
readme_path = output_dir / "README.md"
if no_cache or not readme_path.exists():
with open(readme_path, "w") as f:
f.write(generate_readme(model_name, model_source))
if hf_auth_token is not None:
from huggingface_hub import create_repo, upload_folder
repo_id = f"{hf_organization}/{hf_model_name}"
@retry(stop=stop_after_attempt(5), wait=wait_fixed(5))
def upload_model() -> None:
create_repo(repo_id, exist_ok=True, token=hf_auth_token)
upload_folder(
repo_id=repo_id,
folder_path=output_dir,
# remote repo files to be deleted before uploading
# deletion is in the same commit as the upload, so it's atomic
delete_patterns=DELETE_PATTERNS,
token=hf_auth_token,
)
upload_model()
if __name__ == "__main__":
typer.run(main)

View File

@ -0,0 +1,42 @@
from enum import StrEnum
from typing import NamedTuple
class ModelSource(StrEnum):
INSIGHTFACE = "insightface"
MCLIP = "mclip"
OPENCLIP = "openclip"
class SourceMetadata(NamedTuple):
name: str
link: str
type: str
SOURCE_TO_METADATA = {
ModelSource.MCLIP: SourceMetadata("M-CLIP", "https://huggingface.co/M-CLIP", "CLIP"),
ModelSource.OPENCLIP: SourceMetadata("OpenCLIP", "https://github.com/mlfoundations/open_clip", "CLIP"),
ModelSource.INSIGHTFACE: SourceMetadata(
"InsightFace", "https://github.com/deepinsight/insightface/tree/master", "facial recognition"
),
}
RKNN_SOCS = ["rk3566", "rk3568", "rk3576", "rk3588"]
# glob to delete old UUID blobs when reuploading models
_uuid_char = "[a-fA-F0-9]"
_uuid_glob = _uuid_char * 8 + "-" + _uuid_char * 4 + "-" + _uuid_char * 4 + "-" + _uuid_char * 4 + "-" + _uuid_char * 12
DELETE_PATTERNS = [
"**/*onnx*",
"**/Constant*",
"**/*.weight",
"**/*.bias",
"**/*.proj",
"**/*in_proj_bias",
"**/*.npy",
"**/*.latent",
"**/*.pos_embed",
f"**/{_uuid_glob}",
]

View File

@ -0,0 +1,20 @@
from pathlib import Path
from ..constants import ModelSource
from .models import mclip, openclip
def export(
model_name: str, model_source: ModelSource, output_dir: Path, opset_version: int = 19, no_cache: bool = False
) -> None:
visual_dir = output_dir / "visual"
textual_dir = output_dir / "textual"
match model_source:
case ModelSource.MCLIP:
mclip.to_onnx(model_name, opset_version, visual_dir, textual_dir, no_cache=no_cache)
case ModelSource.OPENCLIP:
name, _, pretrained = model_name.partition("__")
config = openclip.OpenCLIPModelConfig(name, pretrained)
openclip.to_onnx(config, opset_version, visual_dir, textual_dir, no_cache=no_cache)
case _:
raise ValueError(f"Unsupported model source {model_source}")

View File

@ -1,11 +1,6 @@
import os
import tempfile
import warnings
from pathlib import Path
import torch
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
from transformers import AutoTokenizer
from typing import Any
from .openclip import OpenCLIPModelConfig
from .openclip import to_onnx as openclip_to_onnx
@ -21,25 +16,40 @@ _MCLIP_TO_OPENCLIP = {
def to_onnx(
model_name: str,
opset_version: int,
output_dir_visual: Path | str,
output_dir_textual: Path | str,
no_cache: bool = False,
) -> tuple[Path, Path]:
textual_path = get_model_path(output_dir_textual)
with tempfile.TemporaryDirectory() as tmpdir:
model = MultilingualCLIP.from_pretrained(model_name, cache_dir=os.environ.get("CACHE_DIR", tmpdir))
if no_cache or not textual_path.exists():
import torch
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
from transformers import AutoTokenizer
torch.backends.mha.set_fastpath_enabled(False)
model = MultilingualCLIP.from_pretrained(model_name)
AutoTokenizer.from_pretrained(model_name).save_pretrained(output_dir_textual)
model.eval()
for param in model.parameters():
param.requires_grad_(False)
export_text_encoder(model, textual_path)
visual_path, _ = openclip_to_onnx(_MCLIP_TO_OPENCLIP[model_name], output_dir_visual)
_export_text_encoder(model, textual_path, opset_version)
else:
print(f"Model {textual_path} already exists, skipping")
visual_path, _ = openclip_to_onnx(
_MCLIP_TO_OPENCLIP[model_name], opset_version, output_dir_visual, no_cache=no_cache
)
assert visual_path is not None, "Visual model export failed"
return visual_path, textual_path
def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> None:
def _export_text_encoder(model: Any, output_path: Path | str, opset_version: int) -> None:
import torch
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
output_path = Path(output_path)
def forward(self: MultilingualCLIP, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
@ -61,7 +71,7 @@ def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> Non
output_path.as_posix(),
input_names=["input_ids", "attention_mask"],
output_names=["embedding"],
opset_version=17,
opset_version=opset_version,
# dynamic_axes={
# "input_ids": {0: "batch_size", 1: "sequence_length"},
# "attention_mask": {0: "batch_size", 1: "sequence_length"},

View File

@ -0,0 +1,153 @@
import warnings
from dataclasses import dataclass
from functools import cached_property
from pathlib import Path
from typing import Any
from .util import get_model_path, save_config
@dataclass
class OpenCLIPModelConfig:
name: str
pretrained: str
@cached_property
def model_config(self) -> dict[str, Any]:
import open_clip
config: dict[str, Any] | None = open_clip.get_model_config(self.name)
if config is None:
raise ValueError(f"Unknown model {self.name}")
return config
@property
def image_size(self) -> int:
image_size: int = self.model_config["vision_cfg"]["image_size"]
return image_size
@property
def sequence_length(self) -> int:
context_length: int = self.model_config["text_cfg"].get("context_length", 77)
return context_length
def to_onnx(
model_cfg: OpenCLIPModelConfig,
opset_version: int,
output_dir_visual: Path | str | None = None,
output_dir_textual: Path | str | None = None,
no_cache: bool = False,
) -> tuple[Path | None, Path | None]:
visual_path = None
textual_path = None
if output_dir_visual is not None:
output_dir_visual = Path(output_dir_visual)
visual_path = get_model_path(output_dir_visual)
if output_dir_textual is not None:
output_dir_textual = Path(output_dir_textual)
textual_path = get_model_path(output_dir_textual)
if not no_cache and (
(textual_path is None or textual_path.exists()) and (visual_path is None or visual_path.exists())
):
print(f"Models {textual_path} and {visual_path} already exist, skipping")
return visual_path, textual_path
import open_clip
import torch
from transformers import AutoTokenizer
torch.backends.mha.set_fastpath_enabled(False)
model = open_clip.create_model(
model_cfg.name,
pretrained=model_cfg.pretrained,
jit=False,
require_pretrained=True,
)
text_vision_cfg = open_clip.get_model_config(model_cfg.name)
model.eval()
for param in model.parameters():
param.requires_grad_(False)
if visual_path is not None and output_dir_visual is not None:
if no_cache or not visual_path.exists():
save_config(
open_clip.get_model_preprocess_cfg(model),
output_dir_visual / "preprocess_cfg.json",
)
save_config(text_vision_cfg, output_dir_visual.parent / "config.json")
_export_image_encoder(model, model_cfg, visual_path, opset_version)
else:
print(f"Model {visual_path} already exists, skipping")
if textual_path is not None and output_dir_textual is not None:
if no_cache or not textual_path.exists():
tokenizer_name = text_vision_cfg["text_cfg"].get("hf_tokenizer_name", "openai/clip-vit-base-patch32")
AutoTokenizer.from_pretrained(tokenizer_name).save_pretrained(output_dir_textual)
_export_text_encoder(model, model_cfg, textual_path, opset_version)
else:
print(f"Model {textual_path} already exists, skipping")
return visual_path, textual_path
def _export_image_encoder(
model: Any, model_cfg: OpenCLIPModelConfig, output_path: Path | str, opset_version: int
) -> None:
import torch
output_path = Path(output_path)
def encode_image(image: torch.Tensor) -> torch.Tensor:
output = model.encode_image(image, normalize=True)
assert isinstance(output, torch.Tensor)
return output
model.forward = encode_image
args = (torch.randn(1, 3, model_cfg.image_size, model_cfg.image_size),)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
model,
args,
output_path.as_posix(),
input_names=["image"],
output_names=["embedding"],
opset_version=opset_version,
# dynamic_axes={"image": {0: "batch_size"}},
)
def _export_text_encoder(
model: Any, model_cfg: OpenCLIPModelConfig, output_path: Path | str, opset_version: int
) -> None:
import torch
output_path = Path(output_path)
def encode_text(text: torch.Tensor) -> torch.Tensor:
output = model.encode_text(text, normalize=True)
assert isinstance(output, torch.Tensor)
return output
model.forward = encode_text
args = (torch.ones(1, model_cfg.sequence_length, dtype=torch.int32),)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
model,
args,
output_path.as_posix(),
input_names=["text"],
output_names=["embedding"],
opset_version=opset_version,
# dynamic_axes={"text": {0: "batch_size"}},
)

View File

@ -0,0 +1,96 @@
from pathlib import Path
from .constants import RKNN_SOCS
def _export_platform(
model_dir: Path,
target_platform: str,
inputs: list[str] | None = None,
input_size_list: list[list[int]] | None = None,
fuse_matmul_softmax_matmul_to_sdpa: bool = True,
no_cache: bool = False,
) -> None:
from rknn.api import RKNN
input_path = model_dir / "model.onnx"
output_path = model_dir / "rknpu" / target_platform / "model.rknn"
if not no_cache and output_path.exists():
print(f"Model {input_path} already exists at {output_path}, skipping")
return
print(f"Exporting model {input_path} to {output_path}")
rknn = RKNN(verbose=False)
rknn.config(
target_platform=target_platform,
disable_rules=["fuse_matmul_softmax_matmul_to_sdpa"] if not fuse_matmul_softmax_matmul_to_sdpa else [],
enable_flash_attention=False,
model_pruning=True,
)
ret = rknn.load_onnx(model=input_path.as_posix(), inputs=inputs, input_size_list=input_size_list)
if ret != 0:
raise RuntimeError("Load failed!")
ret = rknn.build(do_quantization=False)
if ret != 0:
raise RuntimeError("Build failed!")
output_path.parent.mkdir(parents=True, exist_ok=True)
ret = rknn.export_rknn(output_path.as_posix())
if ret != 0:
raise RuntimeError("Export rknn model failed!")
def _export_platforms(
model_dir: Path,
inputs: list[str] | None = None,
input_size_list: list[list[int]] | None = None,
no_cache: bool = False,
) -> None:
fuse_matmul_softmax_matmul_to_sdpa = True
for soc in RKNN_SOCS:
try:
_export_platform(
model_dir,
soc,
inputs=inputs,
input_size_list=input_size_list,
fuse_matmul_softmax_matmul_to_sdpa=fuse_matmul_softmax_matmul_to_sdpa,
no_cache=no_cache,
)
except Exception as e:
print(f"Failed to export model for {soc}: {e}")
if "inputs or 'outputs' must be set" in str(e):
print("Retrying without fuse_matmul_softmax_matmul_to_sdpa")
fuse_matmul_softmax_matmul_to_sdpa = False
_export_platform(
model_dir,
soc,
inputs=inputs,
input_size_list=input_size_list,
fuse_matmul_softmax_matmul_to_sdpa=fuse_matmul_softmax_matmul_to_sdpa,
no_cache=no_cache,
)
def export(model_dir: Path, no_cache: bool = False) -> None:
textual = model_dir / "textual"
visual = model_dir / "visual"
detection = model_dir / "detection"
recognition = model_dir / "recognition"
if textual.is_dir():
_export_platforms(textual, no_cache=no_cache)
if visual.is_dir():
_export_platforms(visual, no_cache=no_cache)
if detection.is_dir():
_export_platforms(detection, inputs=["input.1"], input_size_list=[[1, 3, 640, 640]], no_cache=no_cache)
if recognition.is_dir():
_export_platforms(recognition, inputs=["input.1"], input_size_list=[[1, 3, 112, 112]], no_cache=no_cache)

View File

@ -0,0 +1,88 @@
import subprocess
from exporters.constants import ModelSource
mclip = [
"M-CLIP/LABSE-Vit-L-14",
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
"M-CLIP/XLM-Roberta-Large-Vit-B-32",
"M-CLIP/XLM-Roberta-Large-Vit-L-14",
]
openclip = [
"RN101__openai",
"RN101__yfcc15m",
"RN50__cc12m",
"RN50__openai",
"RN50__yfcc15m",
"RN50x16__openai",
"RN50x4__openai",
"RN50x64__openai",
"ViT-B-16-SigLIP-256__webli",
"ViT-B-16-SigLIP-384__webli",
"ViT-B-16-SigLIP-512__webli",
"ViT-B-16-SigLIP-i18n-256__webli",
"ViT-B-16-SigLIP2__webli",
"ViT-B-16-SigLIP__webli",
"ViT-B-16-plus-240__laion400m_e31",
"ViT-B-16-plus-240__laion400m_e32",
"ViT-B-16__laion400m_e31",
"ViT-B-16__laion400m_e32",
"ViT-B-16__openai",
"ViT-B-32-SigLIP2-256__webli",
"ViT-B-32__laion2b-s34b-b79k",
"ViT-B-32__laion2b_e16",
"ViT-B-32__laion400m_e31",
"ViT-B-32__laion400m_e32",
"ViT-B-32__openai",
"ViT-H-14-378-quickgelu__dfn5b",
"ViT-H-14-quickgelu__dfn5b",
"ViT-H-14__laion2b-s32b-b79k",
"ViT-L-14-336__openai",
"ViT-L-14-quickgelu__dfn2b",
"ViT-L-14__laion2b-s32b-b82k",
"ViT-L-14__laion400m_e31",
"ViT-L-14__laion400m_e32",
"ViT-L-14__openai",
"ViT-L-16-SigLIP-256__webli",
"ViT-L-16-SigLIP-384__webli",
"ViT-L-16-SigLIP2-256__webli",
"ViT-L-16-SigLIP2-384__webli",
"ViT-L-16-SigLIP2-512__webli",
"ViT-SO400M-14-SigLIP-384__webli",
"ViT-SO400M-14-SigLIP2-378__webli",
"ViT-SO400M-14-SigLIP2__webli",
"ViT-SO400M-16-SigLIP2-256__webli",
"ViT-SO400M-16-SigLIP2-384__webli",
"ViT-SO400M-16-SigLIP2-512__webli",
"ViT-gopt-16-SigLIP2-256__webli",
"ViT-gopt-16-SigLIP2-384__webli",
"nllb-clip-base-siglip__mrl",
"nllb-clip-base-siglip__v1",
"nllb-clip-large-siglip__mrl",
"nllb-clip-large-siglip__v1",
"xlm-roberta-base-ViT-B-32__laion5b_s13b_b90k",
"xlm-roberta-large-ViT-H-14__frozen_laion5b_s13b_b90k",
]
insightface = [
"antelopev2",
"buffalo_l",
"buffalo_m",
"buffalo_s",
]
def export_models(models: list[str], source: ModelSource) -> None:
for model in models:
try:
print(f"Exporting model {model}")
subprocess.check_call(["python", "-m", "immich_model_exporter.export", model, source])
except Exception as e:
print(f"Failed to export model {model}: {e}")
if __name__ == "__main__":
export_models(mclip, ModelSource.MCLIP)
export_models(openclip, ModelSource.OPENCLIP)
export_models(insightface, ModelSource.INSIGHTFACE)

View File

@ -1,114 +0,0 @@
import os
import tempfile
import warnings
from dataclasses import dataclass, field
from pathlib import Path
import open_clip
import torch
from transformers import AutoTokenizer
from .util import get_model_path, save_config
@dataclass
class OpenCLIPModelConfig:
name: str
pretrained: str
image_size: int = field(init=False)
sequence_length: int = field(init=False)
def __post_init__(self) -> None:
open_clip_cfg = open_clip.get_model_config(self.name)
if open_clip_cfg is None:
raise ValueError(f"Unknown model {self.name}")
self.image_size = open_clip_cfg["vision_cfg"]["image_size"]
self.sequence_length = open_clip_cfg["text_cfg"].get("context_length", 77)
def to_onnx(
model_cfg: OpenCLIPModelConfig,
output_dir_visual: Path | str | None = None,
output_dir_textual: Path | str | None = None,
) -> tuple[Path | None, Path | None]:
visual_path = None
textual_path = None
with tempfile.TemporaryDirectory() as tmpdir:
model = open_clip.create_model(
model_cfg.name,
pretrained=model_cfg.pretrained,
jit=False,
cache_dir=os.environ.get("CACHE_DIR", tmpdir),
require_pretrained=True,
)
text_vision_cfg = open_clip.get_model_config(model_cfg.name)
model.eval()
for param in model.parameters():
param.requires_grad_(False)
if output_dir_visual is not None:
output_dir_visual = Path(output_dir_visual)
visual_path = get_model_path(output_dir_visual)
save_config(open_clip.get_model_preprocess_cfg(model), output_dir_visual / "preprocess_cfg.json")
save_config(text_vision_cfg, output_dir_visual.parent / "config.json")
export_image_encoder(model, model_cfg, visual_path)
if output_dir_textual is not None:
output_dir_textual = Path(output_dir_textual)
textual_path = get_model_path(output_dir_textual)
tokenizer_name = text_vision_cfg["text_cfg"].get("hf_tokenizer_name", "openai/clip-vit-base-patch32")
AutoTokenizer.from_pretrained(tokenizer_name).save_pretrained(output_dir_textual)
export_text_encoder(model, model_cfg, textual_path)
return visual_path, textual_path
def export_image_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig, output_path: Path | str) -> None:
output_path = Path(output_path)
def encode_image(image: torch.Tensor) -> torch.Tensor:
output = model.encode_image(image, normalize=True)
assert isinstance(output, torch.Tensor)
return output
args = (torch.randn(1, 3, model_cfg.image_size, model_cfg.image_size),)
traced = torch.jit.trace(encode_image, args) # type: ignore[no-untyped-call]
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
traced,
args,
output_path.as_posix(),
input_names=["image"],
output_names=["embedding"],
opset_version=17,
# dynamic_axes={"image": {0: "batch_size"}},
)
def export_text_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig, output_path: Path | str) -> None:
output_path = Path(output_path)
def encode_text(text: torch.Tensor) -> torch.Tensor:
output = model.encode_text(text, normalize=True)
assert isinstance(output, torch.Tensor)
return output
args = (torch.ones(1, model_cfg.sequence_length, dtype=torch.int32),)
traced = torch.jit.trace(encode_text, args) # type: ignore[no-untyped-call]
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
traced,
args,
output_path.as_posix(),
input_names=["text"],
output_names=["embedding"],
opset_version=17,
# dynamic_axes={"text": {0: "batch_size"}},
)

View File

@ -1,49 +0,0 @@
from pathlib import Path
import onnx
import onnxruntime as ort
import onnxsim
def save_onnx(model: onnx.ModelProto, output_path: Path | str) -> None:
try:
onnx.save(model, output_path)
except ValueError as e:
if "The proto size is larger than the 2 GB limit." in str(e):
onnx.save(model, output_path, save_as_external_data=True, size_threshold=1_000_000)
else:
raise e
def optimize_onnxsim(model_path: Path | str, output_path: Path | str) -> None:
model_path = Path(model_path)
output_path = Path(output_path)
model = onnx.load(model_path.as_posix())
model, check = onnxsim.simplify(model)
assert check, "Simplified ONNX model could not be validated"
for file in model_path.parent.iterdir():
if file.name.startswith("Constant") or "onnx" in file.name or file.suffix == ".weight":
file.unlink()
save_onnx(model, output_path)
def optimize_ort(
model_path: Path | str,
output_path: Path | str,
level: ort.GraphOptimizationLevel = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC,
) -> None:
model_path = Path(model_path)
output_path = Path(output_path)
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = level
sess_options.optimized_model_filepath = output_path.as_posix()
ort.InferenceSession(model_path.as_posix(), providers=["CPUExecutionProvider"], sess_options=sess_options)
def optimize(model_path: Path | str) -> None:
model_path = Path(model_path)
optimize_ort(model_path, model_path)
optimize_onnxsim(model_path, model_path)

View File

@ -0,0 +1,67 @@
[project]
name = "immich_model_exporter"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.10, <4.0"
dependencies = [
"huggingface-hub>=0.29.3",
"multilingual-clip>=1.0.10",
"onnx>=1.14.1",
"onnxruntime>=1.16.0",
"open-clip-torch>=2.31.0",
"typer>=0.15.2",
"rknn-toolkit2>=2.3.0",
"transformers>=4.49.0",
"tenacity>=9.0.0",
]
[dependency-groups]
dev = ["black>=23.3.0", "mypy>=1.3.0", "ruff>=0.0.272"]
[tool.uv]
override-dependencies = [
"onnx>=1.16.0,<2",
"onnxruntime>=1.18.2,<2",
"torch>=2.4",
"torchvision>=0.21",
]
[tool.uv.sources]
torch = [{ index = "pytorch-cpu" }]
torchvision = [{ index = "pytorch-cpu" }]
[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
explicit = true
[tool.hatch.build.targets.sdist]
include = ["immich_model_exporter"]
[tool.hatch.build.targets.wheel]
include = ["immich_model_exporter"]
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.mypy]
python_version = "3.12"
follow_imports = "silent"
warn_redundant_casts = true
disallow_any_generics = true
check_untyped_defs = true
disallow_untyped_defs = true
ignore_missing_imports = true
[tool.ruff]
line-length = 120
target-version = "py312"
[tool.ruff.lint]
select = ["E", "F", "I"]
[tool.black]
line-length = 120
target-version = ['py312']

View File

@ -1,113 +0,0 @@
import gc
import os
from pathlib import Path
from tempfile import TemporaryDirectory
import torch
from huggingface_hub import create_repo, upload_folder
from models import mclip, openclip
from models.optimize import optimize
from rich.progress import Progress
models = [
"M-CLIP/LABSE-Vit-L-14",
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
"M-CLIP/XLM-Roberta-Large-Vit-B-32",
"M-CLIP/XLM-Roberta-Large-Vit-L-14",
"RN101::openai",
"RN101::yfcc15m",
"RN50::cc12m",
"RN50::openai",
"RN50::yfcc15m",
"RN50x16::openai",
"RN50x4::openai",
"RN50x64::openai",
"ViT-B-16-SigLIP-256::webli",
"ViT-B-16-SigLIP-384::webli",
"ViT-B-16-SigLIP-512::webli",
"ViT-B-16-SigLIP-i18n-256::webli",
"ViT-B-16-SigLIP::webli",
"ViT-B-16-plus-240::laion400m_e31",
"ViT-B-16-plus-240::laion400m_e32",
"ViT-B-16::laion400m_e31",
"ViT-B-16::laion400m_e32",
"ViT-B-16::openai",
"ViT-B-32::laion2b-s34b-b79k",
"ViT-B-32::laion2b_e16",
"ViT-B-32::laion400m_e31",
"ViT-B-32::laion400m_e32",
"ViT-B-32::openai",
"ViT-H-14-378-quickgelu::dfn5b",
"ViT-H-14-quickgelu::dfn5b",
"ViT-H-14::laion2b-s32b-b79k",
"ViT-L-14-336::openai",
"ViT-L-14-quickgelu::dfn2b",
"ViT-L-14::laion2b-s32b-b82k",
"ViT-L-14::laion400m_e31",
"ViT-L-14::laion400m_e32",
"ViT-L-14::openai",
"ViT-L-16-SigLIP-256::webli",
"ViT-L-16-SigLIP-384::webli",
"ViT-SO400M-14-SigLIP-384::webli",
"ViT-g-14::laion2b-s12b-b42k",
"nllb-clip-base-siglip::mrl",
"nllb-clip-base-siglip::v1",
"nllb-clip-large-siglip::mrl",
"nllb-clip-large-siglip::v1",
"xlm-roberta-base-ViT-B-32::laion5b_s13b_b90k",
"xlm-roberta-large-ViT-H-14::frozen_laion5b_s13b_b90k",
]
# glob to delete old UUID blobs when reuploading models
uuid_char = "[a-fA-F0-9]"
uuid_glob = uuid_char * 8 + "-" + uuid_char * 4 + "-" + uuid_char * 4 + "-" + uuid_char * 4 + "-" + uuid_char * 12
# remote repo files to be deleted before uploading
# deletion is in the same commit as the upload, so it's atomic
delete_patterns = ["**/*onnx*", "**/Constant*", "**/*.weight", "**/*.bias", f"**/{uuid_glob}"]
with Progress() as progress:
task = progress.add_task("[green]Exporting models...", total=len(models))
token = os.environ.get("HF_AUTH_TOKEN")
torch.backends.mha.set_fastpath_enabled(False)
with TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
for model in models:
model_name = model.split("/")[-1].replace("::", "__")
hf_model_name = model_name.replace("xlm-roberta-large", "XLM-Roberta-Large")
hf_model_name = model_name.replace("xlm-roberta-base", "XLM-Roberta-Base")
config_path = tmpdir / model_name / "config.json"
def export() -> None:
progress.update(task, description=f"[green]Exporting {hf_model_name}")
visual_dir = tmpdir / hf_model_name / "visual"
textual_dir = tmpdir / hf_model_name / "textual"
if model.startswith("M-CLIP"):
visual_path, textual_path = mclip.to_onnx(model, visual_dir, textual_dir)
else:
name, _, pretrained = model_name.partition("__")
config = openclip.OpenCLIPModelConfig(name, pretrained)
visual_path, textual_path = openclip.to_onnx(config, visual_dir, textual_dir)
progress.update(task, description=f"[green]Optimizing {hf_model_name} (visual)")
optimize(visual_path)
progress.update(task, description=f"[green]Optimizing {hf_model_name} (textual)")
optimize(textual_path)
gc.collect()
def upload() -> None:
progress.update(task, description=f"[yellow]Uploading {hf_model_name}")
repo_id = f"immich-app/{hf_model_name}"
create_repo(repo_id, exist_ok=True)
upload_folder(
repo_id=repo_id,
folder_path=tmpdir / hf_model_name,
delete_patterns=delete_patterns,
token=token,
)
export()
if token is not None:
upload()
progress.update(task, advance=1)

1395
machine-learning/export/uv.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,179 @@
commit 16839b58d9b3c3162a67ce5d776b36d4d24e801f
Author: mertalev <101130780+mertalev@users.noreply.github.com>
Date: Wed Mar 5 11:25:38 2025 -0500
disable algo caching (attributed to @dmnieto in https://github.com/microsoft/onnxruntime/pull/19567)
diff --git a/onnxruntime/core/providers/rocm/nn/conv.cc b/onnxruntime/core/providers/rocm/nn/conv.cc
index d7f47d07a8..4060a2af52 100644
--- a/onnxruntime/core/providers/rocm/nn/conv.cc
+++ b/onnxruntime/core/providers/rocm/nn/conv.cc
@@ -127,7 +127,6 @@ Status Conv<T, NHWC>::UpdateState(OpKernelContext* context, bool bias_expected)
if (w_dims_changed) {
s_.last_w_dims = gsl::make_span(w_dims);
- s_.cached_benchmark_fwd_results.clear();
}
ORT_RETURN_IF_ERROR(conv_attrs_.ValidateInputShape(X->Shape(), W->Shape(), channels_last, channels_last));
@@ -277,35 +276,6 @@ Status Conv<T, NHWC>::UpdateState(OpKernelContext* context, bool bias_expected)
HIP_CALL_THROW(hipMalloc(&s_.b_zero, malloc_size));
HIP_CALL_THROW(hipMemsetAsync(s_.b_zero, 0, malloc_size, Stream(context)));
}
-
- if (!s_.cached_benchmark_fwd_results.contains(x_dims_miopen)) {
- miopenConvAlgoPerf_t perf;
- int algo_count = 1;
- const ROCMExecutionProvider* rocm_ep = static_cast<const ROCMExecutionProvider*>(this->Info().GetExecutionProvider());
- static constexpr int num_algos = MIOPEN_CONVOLUTION_FWD_ALGO_COUNT;
- size_t max_ws_size = rocm_ep->GetMiopenConvUseMaxWorkspace() ? GetMaxWorkspaceSize(GetMiopenHandle(context), s_, kAllAlgos, num_algos, rocm_ep->GetDeviceId())
- : AlgoSearchWorkspaceSize;
- IAllocatorUniquePtr<void> algo_search_workspace = GetTransientScratchBuffer<void>(max_ws_size);
- MIOPEN_RETURN_IF_ERROR(miopenFindConvolutionForwardAlgorithm(
- GetMiopenHandle(context),
- s_.x_tensor,
- s_.x_data,
- s_.w_desc,
- s_.w_data,
- s_.conv_desc,
- s_.y_tensor,
- s_.y_data,
- 1, // requestedAlgoCount
- &algo_count, // returnedAlgoCount
- &perf,
- algo_search_workspace.get(),
- max_ws_size,
- false)); // Do not do exhaustive algo search.
- s_.cached_benchmark_fwd_results.insert(x_dims_miopen, {perf.fwd_algo, perf.memory});
- }
- const auto& perf = s_.cached_benchmark_fwd_results.at(x_dims_miopen);
- s_.fwd_algo = perf.fwd_algo;
- s_.workspace_bytes = perf.memory;
} else {
// set Y
s_.Y = context->Output(0, TensorShape(s_.y_dims));
@@ -319,6 +289,31 @@ Status Conv<T, NHWC>::UpdateState(OpKernelContext* context, bool bias_expected)
s_.y_data = reinterpret_cast<HipT*>(s_.Y->MutableData<T>());
}
}
+
+ miopenConvAlgoPerf_t perf;
+ int algo_count = 1;
+ const ROCMExecutionProvider* rocm_ep = static_cast<const ROCMExecutionProvider*>(this->Info().GetExecutionProvider());
+ static constexpr int num_algos = MIOPEN_CONVOLUTION_FWD_ALGO_COUNT;
+ size_t max_ws_size = rocm_ep->GetMiopenConvUseMaxWorkspace() ? GetMaxWorkspaceSize(GetMiopenHandle(context), s_, kAllAlgos, num_algos, rocm_ep->GetDeviceId())
+ : AlgoSearchWorkspaceSize;
+ IAllocatorUniquePtr<void> algo_search_workspace = GetTransientScratchBuffer<void>(max_ws_size);
+ MIOPEN_RETURN_IF_ERROR(miopenFindConvolutionForwardAlgorithm(
+ GetMiopenHandle(context),
+ s_.x_tensor,
+ s_.x_data,
+ s_.w_desc,
+ s_.w_data,
+ s_.conv_desc,
+ s_.y_tensor,
+ s_.y_data,
+ 1, // requestedAlgoCount
+ &algo_count, // returnedAlgoCount
+ &perf,
+ algo_search_workspace.get(),
+ max_ws_size,
+ false)); // Do not do exhaustive algo search.
+ s_.fwd_algo = perf.fwd_algo;
+ s_.workspace_bytes = perf.memory;
return Status::OK();
}
diff --git a/onnxruntime/core/providers/rocm/nn/conv.h b/onnxruntime/core/providers/rocm/nn/conv.h
index bc9846203e..d54218f258 100644
--- a/onnxruntime/core/providers/rocm/nn/conv.h
+++ b/onnxruntime/core/providers/rocm/nn/conv.h
@@ -108,9 +108,6 @@ class lru_unordered_map {
list_type lru_list_;
};
-// cached miopen descriptors
-constexpr size_t MAX_CACHED_ALGO_PERF_RESULTS = 10000;
-
template <typename AlgoPerfType>
struct MiopenConvState {
// if x/w dims changed, update algo and miopenTensors
@@ -148,9 +145,6 @@ struct MiopenConvState {
decltype(AlgoPerfType().memory) memory;
};
- lru_unordered_map<TensorShapeVector, PerfFwdResultParams, vector_hash> cached_benchmark_fwd_results{MAX_CACHED_ALGO_PERF_RESULTS};
- lru_unordered_map<TensorShapeVector, PerfBwdResultParams, vector_hash> cached_benchmark_bwd_results{MAX_CACHED_ALGO_PERF_RESULTS};
-
// Some properties needed to support asymmetric padded Conv nodes
bool post_slicing_required;
TensorShapeVector slice_starts;
diff --git a/onnxruntime/core/providers/rocm/nn/conv_transpose.cc b/onnxruntime/core/providers/rocm/nn/conv_transpose.cc
index 7447113fdf..a662e35b2e 100644
--- a/onnxruntime/core/providers/rocm/nn/conv_transpose.cc
+++ b/onnxruntime/core/providers/rocm/nn/conv_transpose.cc
@@ -76,7 +76,6 @@ Status ConvTranspose<T, NHWC>::DoConvTranspose(OpKernelContext* context, bool dy
if (w_dims_changed) {
s_.last_w_dims = gsl::make_span(w_dims);
- s_.cached_benchmark_bwd_results.clear();
}
ConvTransposeAttributes::Prepare p;
@@ -126,35 +125,29 @@ Status ConvTranspose<T, NHWC>::DoConvTranspose(OpKernelContext* context, bool dy
}
y_data = reinterpret_cast<HipT*>(p.Y->MutableData<T>());
-
- if (!s_.cached_benchmark_bwd_results.contains(x_dims)) {
- IAllocatorUniquePtr<void> algo_search_workspace = GetScratchBuffer<void>(AlgoSearchWorkspaceSize, context->GetComputeStream());
-
- miopenConvAlgoPerf_t perf;
- int algo_count = 1;
- MIOPEN_RETURN_IF_ERROR(miopenFindConvolutionBackwardDataAlgorithm(
- GetMiopenHandle(context),
- s_.x_tensor,
- x_data,
- s_.w_desc,
- w_data,
- s_.conv_desc,
- s_.y_tensor,
- y_data,
- 1,
- &algo_count,
- &perf,
- algo_search_workspace.get(),
- AlgoSearchWorkspaceSize,
- false));
- s_.cached_benchmark_bwd_results.insert(x_dims, {perf.bwd_data_algo, perf.memory});
- }
-
- const auto& perf = s_.cached_benchmark_bwd_results.at(x_dims);
- s_.bwd_data_algo = perf.bwd_data_algo;
- s_.workspace_bytes = perf.memory;
}
+ IAllocatorUniquePtr<void> algo_search_workspace = GetScratchBuffer<void>(AlgoSearchWorkspaceSize, context->GetComputeStream());
+ miopenConvAlgoPerf_t perf;
+ int algo_count = 1;
+ MIOPEN_RETURN_IF_ERROR(miopenFindConvolutionBackwardDataAlgorithm(
+ GetMiopenHandle(context),
+ s_.x_tensor,
+ x_data,
+ s_.w_desc,
+ w_data,
+ s_.conv_desc,
+ s_.y_tensor,
+ y_data,
+ 1,
+ &algo_count,
+ &perf,
+ algo_search_workspace.get(),
+ AlgoSearchWorkspaceSize,
+ false));
+ s_.bwd_data_algo = perf.bwd_data_algo;
+ s_.workspace_bytes = perf.memory;
+
// The following block will be executed in case there has been no change in the shapes of the
// input and the filter compared to the previous run
if (!y_data) {

View File

@ -0,0 +1,13 @@
diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt
index d90a2a355..bb1a7de12 100644
--- a/cmake/CMakeLists.txt
+++ b/cmake/CMakeLists.txt
@@ -295,7 +295,7 @@ if (onnxruntime_USE_ROCM)
endif()
if (NOT CMAKE_HIP_ARCHITECTURES)
- set(CMAKE_HIP_ARCHITECTURES "gfx908;gfx90a;gfx1030;gfx1100;gfx1101;gfx940;gfx941;gfx942;gfx1200;gfx1201")
+ set(CMAKE_HIP_ARCHITECTURES "gfx900;gfx908;gfx90a;gfx1030;gfx1100;gfx1101;gfx1102;gfx940;gfx941;gfx942;gfx1200;gfx1201")
endif()
file(GLOB rocm_cmake_components ${onnxruntime_ROCM_HOME}/lib/cmake/*)

View File

@ -51,6 +51,8 @@ cpu = ["onnxruntime>=1.15.0,<2"]
cuda = ["onnxruntime-gpu>=1.17.0,<2"]
openvino = ["onnxruntime-openvino>=1.17.1,<1.19.0"]
armnn = ["onnxruntime>=1.15.0,<2"]
rknn = ["onnxruntime>=1.15.0,<2", "rknn-toolkit-lite2>=2.3.0,<3"]
rocm = []
[tool.uv]
compile-bytecode = true

View File

@ -2,16 +2,19 @@
echo "Initializing Immich ML $IMMICH_SOURCE_REF"
lib_path="/usr/lib/$(arch)-linux-gnu/libmimalloc.so.2"
# mimalloc seems to increase memory usage dramatically with openvino, need to investigate
if ! [ "$DEVICE" = "openvino" ]; then
export LD_PRELOAD="$lib_path"
export LD_BIND_NOW=1
: "${MACHINE_LEARNING_WORKER_TIMEOUT:=120}"
else
: "${MACHINE_LEARNING_WORKER_TIMEOUT:=300}"
fi
# mimalloc seems to increase memory usage dramatically with openvino, need to investigate
if ! [ "$DEVICE" = "openvino" ] && ! [ "$DEVICE" = "rocm" ]; then
lib_path="/usr/lib/$(arch)-linux-gnu/libmimalloc.so.2"
export LD_PRELOAD="$lib_path"
export LD_BIND_NOW=1
fi
: "${IMMICH_HOST:=[::]}"
: "${IMMICH_PORT:=3003}"
: "${MACHINE_LEARNING_WORKERS:=1}"

View File

@ -1109,6 +1109,10 @@ cuda = [
openvino = [
{ name = "onnxruntime-openvino" },
]
rknn = [
{ name = "onnxruntime" },
{ name = "rknn-toolkit-lite2" },
]
[package.dev-dependencies]
dev = [
@ -1162,6 +1166,7 @@ requires-dist = [
{ name = "insightface", specifier = ">=0.7.3,<1.0" },
{ name = "onnxruntime", marker = "extra == 'armnn'", specifier = ">=1.15.0,<2" },
{ name = "onnxruntime", marker = "extra == 'cpu'", specifier = ">=1.15.0,<2" },
{ name = "onnxruntime", marker = "extra == 'rknn'", specifier = ">=1.15.0,<2" },
{ name = "onnxruntime-gpu", marker = "extra == 'cuda'", specifier = ">=1.17.0,<2", index = "https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/" },
{ name = "onnxruntime-openvino", marker = "extra == 'openvino'", specifier = ">=1.17.1,<1.19.0" },
{ name = "opencv-python-headless", specifier = ">=4.7.0.72,<5.0" },
@ -1171,10 +1176,11 @@ requires-dist = [
{ name = "pydantic-settings", specifier = ">=2.5.2,<3" },
{ name = "python-multipart", specifier = ">=0.0.6,<1.0" },
{ name = "rich", specifier = ">=13.4.2" },
{ name = "rknn-toolkit-lite2", marker = "extra == 'rknn'", specifier = ">=2.3.0,<3" },
{ name = "tokenizers", specifier = ">=0.15.0,<1.0" },
{ name = "uvicorn", extras = ["standard"], specifier = ">=0.22.0,<1.0" },
]
provides-extras = ["cpu", "cuda", "openvino", "armnn"]
provides-extras = ["cpu", "cuda", "openvino", "armnn", "rknn", "rocm"]
[package.metadata.requires-dev]
dev = [
@ -2131,6 +2137,77 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424 },
]
[[package]]
name = "rknn-toolkit-lite2"
version = "2.3.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "numpy" },
{ name = "psutil" },
{ name = "ruamel-yaml" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/ed/77/6af374a4a8cd2aee762a1fb8a3050dcf3f129134bbdc4bb6bed755c4325b/rknn_toolkit_lite2-2.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b6733689bd09a262bcb6ba4744e690dd4b37ebeac4ed427cf45242c4b4ce9a4", size = 559372 },
{ url = "https://files.pythonhosted.org/packages/9b/0c/76ff1eb09d09ce4394a6959d2343a321d28dd9e604348ffdafceafdc344c/rknn_toolkit_lite2-2.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3e4fefe355dc34a155680e4bcb9e4abb37ebc271f045ec9e0a4a3a018bc5beb", size = 569149 },
{ url = "https://files.pythonhosted.org/packages/0d/6e/8679562028051b02312212defc6e8c07248953f10dd7ad506e941b575bf3/rknn_toolkit_lite2-2.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37394371d1561f470c553f39869d7c35ff93405dffe3d0d72babf297a2b0aee9", size = 527457 },
]
[[package]]
name = "ruamel-yaml"
version = "0.18.10"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "ruamel-yaml-clib", marker = "python_full_version < '3.13' and platform_python_implementation == 'CPython'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/ea/46/f44d8be06b85bc7c4d8c95d658be2b68f27711f279bf9dd0612a5e4794f5/ruamel.yaml-0.18.10.tar.gz", hash = "sha256:20c86ab29ac2153f80a428e1254a8adf686d3383df04490514ca3b79a362db58", size = 143447 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c2/36/dfc1ebc0081e6d39924a2cc53654497f967a084a436bb64402dfce4254d9/ruamel.yaml-0.18.10-py3-none-any.whl", hash = "sha256:30f22513ab2301b3d2b577adc121c6471f28734d3d9728581245f1e76468b4f1", size = 117729 },
]
[[package]]
name = "ruamel-yaml-clib"
version = "0.2.12"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/20/84/80203abff8ea4993a87d823a5f632e4d92831ef75d404c9fc78d0176d2b5/ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f", size = 225315 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/70/57/40a958e863e299f0c74ef32a3bde9f2d1ea8d69669368c0c502a0997f57f/ruamel.yaml.clib-0.2.12-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5", size = 131301 },
{ url = "https://files.pythonhosted.org/packages/98/a8/29a3eb437b12b95f50a6bcc3d7d7214301c6c529d8fdc227247fa84162b5/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969", size = 633728 },
{ url = "https://files.pythonhosted.org/packages/35/6d/ae05a87a3ad540259c3ad88d71275cbd1c0f2d30ae04c65dcbfb6dcd4b9f/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df", size = 722230 },
{ url = "https://files.pythonhosted.org/packages/7f/b7/20c6f3c0b656fe609675d69bc135c03aac9e3865912444be6339207b6648/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76", size = 686712 },
{ url = "https://files.pythonhosted.org/packages/cd/11/d12dbf683471f888d354dac59593873c2b45feb193c5e3e0f2ebf85e68b9/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6", size = 663936 },
{ url = "https://files.pythonhosted.org/packages/72/14/4c268f5077db5c83f743ee1daeb236269fa8577133a5cfa49f8b382baf13/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd", size = 696580 },
{ url = "https://files.pythonhosted.org/packages/30/fc/8cd12f189c6405a4c1cf37bd633aa740a9538c8e40497c231072d0fef5cf/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a", size = 663393 },
{ url = "https://files.pythonhosted.org/packages/80/29/c0a017b704aaf3cbf704989785cd9c5d5b8ccec2dae6ac0c53833c84e677/ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da", size = 100326 },
{ url = "https://files.pythonhosted.org/packages/3a/65/fa39d74db4e2d0cd252355732d966a460a41cd01c6353b820a0952432839/ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28", size = 118079 },
{ url = "https://files.pythonhosted.org/packages/fb/8f/683c6ad562f558cbc4f7c029abcd9599148c51c54b5ef0f24f2638da9fbb/ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6", size = 132224 },
{ url = "https://files.pythonhosted.org/packages/3c/d2/b79b7d695e2f21da020bd44c782490578f300dd44f0a4c57a92575758a76/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e", size = 641480 },
{ url = "https://files.pythonhosted.org/packages/68/6e/264c50ce2a31473a9fdbf4fa66ca9b2b17c7455b31ef585462343818bd6c/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e", size = 739068 },
{ url = "https://files.pythonhosted.org/packages/86/29/88c2567bc893c84d88b4c48027367c3562ae69121d568e8a3f3a8d363f4d/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52", size = 703012 },
{ url = "https://files.pythonhosted.org/packages/11/46/879763c619b5470820f0cd6ca97d134771e502776bc2b844d2adb6e37753/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642", size = 704352 },
{ url = "https://files.pythonhosted.org/packages/02/80/ece7e6034256a4186bbe50dee28cd032d816974941a6abf6a9d65e4228a7/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2", size = 737344 },
{ url = "https://files.pythonhosted.org/packages/f0/ca/e4106ac7e80efbabdf4bf91d3d32fc424e41418458251712f5672eada9ce/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3", size = 714498 },
{ url = "https://files.pythonhosted.org/packages/67/58/b1f60a1d591b771298ffa0428237afb092c7f29ae23bad93420b1eb10703/ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4", size = 100205 },
{ url = "https://files.pythonhosted.org/packages/b4/4f/b52f634c9548a9291a70dfce26ca7ebce388235c93588a1068028ea23fcc/ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb", size = 118185 },
{ url = "https://files.pythonhosted.org/packages/48/41/e7a405afbdc26af961678474a55373e1b323605a4f5e2ddd4a80ea80f628/ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632", size = 133433 },
{ url = "https://files.pythonhosted.org/packages/ec/b0/b850385604334c2ce90e3ee1013bd911aedf058a934905863a6ea95e9eb4/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d", size = 647362 },
{ url = "https://files.pythonhosted.org/packages/44/d0/3f68a86e006448fb6c005aee66565b9eb89014a70c491d70c08de597f8e4/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c", size = 754118 },
{ url = "https://files.pythonhosted.org/packages/52/a9/d39f3c5ada0a3bb2870d7db41901125dbe2434fa4f12ca8c5b83a42d7c53/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd", size = 706497 },
{ url = "https://files.pythonhosted.org/packages/b0/fa/097e38135dadd9ac25aecf2a54be17ddf6e4c23e43d538492a90ab3d71c6/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31", size = 698042 },
{ url = "https://files.pythonhosted.org/packages/ec/d5/a659ca6f503b9379b930f13bc6b130c9f176469b73b9834296822a83a132/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680", size = 745831 },
{ url = "https://files.pythonhosted.org/packages/db/5d/36619b61ffa2429eeaefaab4f3374666adf36ad8ac6330d855848d7d36fd/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d", size = 715692 },
{ url = "https://files.pythonhosted.org/packages/b1/82/85cb92f15a4231c89b95dfe08b09eb6adca929ef7df7e17ab59902b6f589/ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5", size = 98777 },
{ url = "https://files.pythonhosted.org/packages/d7/8f/c3654f6f1ddb75daf3922c3d8fc6005b1ab56671ad56ffb874d908bfa668/ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4", size = 115523 },
{ url = "https://files.pythonhosted.org/packages/29/00/4864119668d71a5fa45678f380b5923ff410701565821925c69780356ffa/ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a", size = 132011 },
{ url = "https://files.pythonhosted.org/packages/7f/5e/212f473a93ae78c669ffa0cb051e3fee1139cb2d385d2ae1653d64281507/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475", size = 642488 },
{ url = "https://files.pythonhosted.org/packages/1f/8f/ecfbe2123ade605c49ef769788f79c38ddb1c8fa81e01f4dbf5cf1a44b16/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef", size = 745066 },
{ url = "https://files.pythonhosted.org/packages/e2/a9/28f60726d29dfc01b8decdb385de4ced2ced9faeb37a847bd5cf26836815/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6", size = 701785 },
{ url = "https://files.pythonhosted.org/packages/84/7e/8e7ec45920daa7f76046578e4f677a3215fe8f18ee30a9cb7627a19d9b4c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf", size = 693017 },
{ url = "https://files.pythonhosted.org/packages/c5/b3/d650eaade4ca225f02a648321e1ab835b9d361c60d51150bac49063b83fa/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1", size = 741270 },
{ url = "https://files.pythonhosted.org/packages/87/b8/01c29b924dcbbed75cc45b30c30d565d763b9c4d540545a0eeecffb8f09c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01", size = 709059 },
{ url = "https://files.pythonhosted.org/packages/30/8c/ed73f047a73638257aa9377ad356bea4d96125b305c34a28766f4445cc0f/ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6", size = 98583 },
{ url = "https://files.pythonhosted.org/packages/b0/85/e8e751d8791564dd333d5d9a4eab0a7a115f7e349595417fd50ecae3395c/ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3", size = 115190 },
]
[[package]]
name = "ruff"
version = "0.9.9"

View File

@ -197,7 +197,7 @@
"control_bottom_app_bar_edit_time": "Edit Date & Time",
"control_bottom_app_bar_favorite": "Favorite",
"control_bottom_app_bar_share": "Share",
"control_bottom_app_bar_share_to": "Share To",
"control_bottom_app_bar_share_link": "Share Link",
"control_bottom_app_bar_stack": "Stack",
"control_bottom_app_bar_trash_from_immich": "Move to Trash",
"control_bottom_app_bar_unarchive": "Unarchive",
@ -264,7 +264,9 @@
"exif_bottom_sheet_location_add": "Add a location",
"exif_bottom_sheet_people": "PEOPLE",
"exif_bottom_sheet_person_add_person": "Add name",
"exif_bottom_sheet_person_age": "Age {}",
"exif_bottom_sheet_person_age_years": "Age {}",
"exif_bottom_sheet_person_age_year_months": "Age 1 year, {} months",
"exif_bottom_sheet_person_age_months": "Age {} months",
"experimental_settings_new_asset_list_subtitle": "Work in progress",
"experimental_settings_new_asset_list_title": "Enable experimental photo grid",
"experimental_settings_subtitle": "Use at your own risk!",

View File

@ -4,8 +4,6 @@ import 'package:immich_mobile/domain/models/user.model.dart';
abstract interface class IUserRepository implements IDatabaseRepository {
Future<bool> insert(UserDto user);
Future<UserDto?> get(int id);
Future<UserDto?> getByUserId(String id);
Future<List<UserDto?>> getByUserIds(List<String> ids);
@ -16,7 +14,7 @@ abstract interface class IUserRepository implements IDatabaseRepository {
Future<UserDto> update(UserDto user);
Future<void> delete(List<int> ids);
Future<void> delete(List<String> ids);
Future<void> deleteAll();
}

View File

@ -1,7 +1,5 @@
import 'dart:ui';
import 'package:immich_mobile/utils/hash.dart';
enum AvatarColor {
// do not change this order or reuse indices for other purposes, adding is OK
primary,
@ -32,7 +30,7 @@ enum AvatarColor {
// TODO: Rename to User once Isar is removed
class UserDto {
final String uid;
final String id;
final String email;
final String name;
final bool isAdmin;
@ -50,11 +48,10 @@ class UserDto {
final int quotaUsageInBytes;
final int quotaSizeInBytes;
int get id => fastHash(uid);
bool get hasQuota => quotaSizeInBytes > 0;
const UserDto({
required this.uid,
required this.id,
required this.email,
required this.name,
required this.isAdmin,
@ -73,7 +70,6 @@ class UserDto {
String toString() {
return '''User: {
id: $id,
uid: $uid,
email: $email,
name: $name,
isAdmin: $isAdmin,
@ -90,7 +86,7 @@ quotaSizeInBytes: $quotaSizeInBytes,
}
UserDto copyWith({
String? uid,
String? id,
String? email,
String? name,
bool? isAdmin,
@ -105,7 +101,7 @@ quotaSizeInBytes: $quotaSizeInBytes,
int? quotaSizeInBytes,
}) =>
UserDto(
uid: uid ?? this.uid,
id: id ?? this.id,
email: email ?? this.email,
name: name ?? this.name,
isAdmin: isAdmin ?? this.isAdmin,
@ -124,7 +120,7 @@ quotaSizeInBytes: $quotaSizeInBytes,
bool operator ==(covariant UserDto other) {
if (identical(this, other)) return true;
return other.uid == uid &&
return other.id == id &&
other.updatedAt.isAtSameMomentAs(updatedAt) &&
other.avatarColor == avatarColor &&
other.email == email &&
@ -141,7 +137,7 @@ quotaSizeInBytes: $quotaSizeInBytes,
@override
int get hashCode =>
uid.hashCode ^
id.hashCode ^
name.hashCode ^
email.hashCode ^
updatedAt.hashCode ^

View File

@ -44,10 +44,14 @@ class UserService {
Future<String?> createProfileImage(String name, Uint8List image) async {
try {
return await _userApiRepository.createProfileImage(
final path = await _userApiRepository.createProfileImage(
name: name,
data: image,
);
final updatedUser = getMyUser().copyWith(profileImagePath: path);
await _storeService.put(StoreKey.currentUser, updatedUser);
await _userRepository.update(updatedUser);
return path;
} catch (e) {
_log.warning("Failed to upload profile image", e);
return null;

View File

@ -3,6 +3,7 @@ import 'dart:typed_data';
import 'package:collection/collection.dart';
import 'package:immich_mobile/domain/models/user.model.dart';
import 'package:immich_mobile/entities/asset.entity.dart';
import 'package:immich_mobile/utils/hash.dart';
extension ListExtension<E> on List<E> {
List<E> uniqueConsecutive({
@ -62,11 +63,11 @@ extension AssetListExtension on Iterable<Asset> {
void Function()? errorCallback,
}) {
if (owner == null) return [];
final userId = owner.id;
final bool onlyOwned = every((e) => e.ownerId == userId);
final isarUserId = fastHash(owner.id);
final bool onlyOwned = every((e) => e.ownerId == isarUserId);
if (!onlyOwned) {
if (errorCallback != null) errorCallback();
return where((a) => a.ownerId == userId);
return where((a) => a.ownerId == isarUserId);
}
return this;
}

View File

@ -40,7 +40,7 @@ class User {
});
static User fromDto(UserDto dto) => User(
id: dto.uid,
id: dto.id,
updatedAt: dto.updatedAt,
email: dto.email,
name: dto.name,
@ -56,7 +56,7 @@ class User {
);
UserDto toDto() => UserDto(
uid: id,
id: id,
email: email,
name: name,
isAdmin: isAdmin,

View File

@ -78,7 +78,9 @@ class IsarStoreRepository extends IsarDatabaseRepository
const (DateTime) => entity.intValue == null
? null
: DateTime.fromMillisecondsSinceEpoch(entity.intValue!),
const (UserDto) => await IsarUserRepository(_db).get(entity.intValue!),
const (UserDto) => entity.strValue == null
? null
: await IsarUserRepository(_db).getByUserId(entity.strValue!),
_ => null,
} as T?;
@ -89,8 +91,8 @@ class IsarStoreRepository extends IsarDatabaseRepository
const (bool) => ((value as bool) ? 1 : 0, null),
const (DateTime) => ((value as DateTime).millisecondsSinceEpoch, null),
const (UserDto) => (
(await IsarUserRepository(_db).update(value as UserDto)).id,
null,
(await IsarUserRepository(_db).update(value as UserDto)).id,
),
_ => throw UnsupportedError(
"Unsupported primitive type: ${key.type} for key: ${key.name}",

View File

@ -11,9 +11,9 @@ class IsarUserRepository extends IsarDatabaseRepository
const IsarUserRepository(super.db) : _db = db;
@override
Future<void> delete(List<int> ids) async {
Future<void> delete(List<String> ids) async {
await transaction(() async {
await _db.users.deleteAll(ids);
await _db.users.deleteAllById(ids);
});
}
@ -24,11 +24,6 @@ class IsarUserRepository extends IsarDatabaseRepository
});
}
@override
Future<UserDto?> get(int id) async {
return (await _db.users.get(id))?.toDto();
}
@override
Future<List<UserDto>> getAll({SortUserBy? sortBy}) async {
return (await _db.users

View File

@ -4,7 +4,7 @@ import 'package:openapi/api.dart';
abstract final class UserConverter {
/// Base user dto used where the complete user object is not required
static UserDto fromSimpleUserDto(UserResponseDto dto) => UserDto(
uid: dto.id,
id: dto.id,
email: dto.email,
name: dto.name,
isAdmin: false,
@ -18,7 +18,7 @@ abstract final class UserConverter {
UserPreferencesResponseDto? preferenceDto,
]) =>
UserDto(
uid: adminDto.id,
id: adminDto.id,
email: adminDto.email,
name: adminDto.name,
isAdmin: adminDto.isAdmin,
@ -34,7 +34,7 @@ abstract final class UserConverter {
);
static UserDto fromPartnerDto(PartnerResponseDto dto) => UserDto(
uid: dto.id,
id: dto.id,
email: dto.email,
name: dto.name,
isAdmin: false,

View File

@ -19,7 +19,7 @@ abstract interface class IAssetRepository implements IDatabaseRepository {
);
Future<List<Asset>> getAll({
required int ownerId,
required String ownerId,
AssetState? state,
AssetSort? sortBy,
int? limit,
@ -29,8 +29,8 @@ abstract interface class IAssetRepository implements IDatabaseRepository {
Future<List<Asset>> getByAlbum(
Album album, {
Iterable<int> notOwnedBy = const [],
int? ownerId,
Iterable<String> notOwnedBy = const [],
String? ownerId,
AssetState? state,
AssetSort? sortBy,
});
@ -45,7 +45,7 @@ abstract interface class IAssetRepository implements IDatabaseRepository {
Future<List<Asset>> getMatches({
required List<Asset> assets,
required int ownerId,
required String ownerId,
AssetState? state,
int limit = 100,
});
@ -64,10 +64,10 @@ abstract interface class IAssetRepository implements IDatabaseRepository {
Stream<Asset?> watchAsset(int id, {bool fireImmediately = false});
Future<List<Asset>> getTrashAssets(int userId);
Future<List<Asset>> getTrashAssets(String userId);
Future<List<Asset>> getRecentlyAddedAssets(int userId);
Future<List<Asset>> getMotionAssets(int userId);
Future<List<Asset>> getRecentlyAddedAssets(String userId);
Future<List<Asset>> getMotionAssets(String userId);
}
enum AssetSort { checksum, ownerIdChecksum }

View File

@ -2,7 +2,7 @@ import 'package:immich_mobile/entities/etag.entity.dart';
import 'package:immich_mobile/interfaces/database.interface.dart';
abstract interface class IETagRepository implements IDatabaseRepository {
Future<ETag?> get(int id);
Future<ETag?> get(String id);
Future<ETag?> getById(String id);

View File

@ -3,22 +3,25 @@ import 'package:immich_mobile/entities/asset.entity.dart';
import 'package:immich_mobile/widgets/asset_grid/asset_grid_data_structure.dart';
abstract class ITimelineRepository {
Future<List<int>> getTimelineUserIds(int id);
Future<List<String>> getTimelineUserIds(String id);
Stream<List<int>> watchTimelineUsers(int id);
Stream<List<String>> watchTimelineUsers(String id);
Stream<RenderList> watchArchiveTimeline(int userId);
Stream<RenderList> watchFavoriteTimeline(int userId);
Stream<RenderList> watchTrashTimeline(int userId);
Stream<RenderList> watchArchiveTimeline(String userId);
Stream<RenderList> watchFavoriteTimeline(String userId);
Stream<RenderList> watchTrashTimeline(String userId);
Stream<RenderList> watchAlbumTimeline(
Album album,
GroupAssetsBy groupAssetsBy,
);
Stream<RenderList> watchAllVideosTimeline();
Stream<RenderList> watchHomeTimeline(int userId, GroupAssetsBy groupAssetsBy);
Stream<RenderList> watchHomeTimeline(
String userId,
GroupAssetsBy groupAssetsBy,
);
Stream<RenderList> watchMultiUsersTimeline(
List<int> userIds,
List<String> userIds,
GroupAssetsBy groupAssetsBy,
);
@ -27,5 +30,5 @@ abstract class ITimelineRepository {
GroupAssetsBy getGroupByOption,
);
Stream<RenderList> watchAssetSelectionTimeline(int userId);
Stream<RenderList> watchAssetSelectionTimeline(String userId);
}

View File

@ -26,7 +26,7 @@ class AlbumAdditionalSharedUserSelectionPage extends HookConsumerWidget {
final sharedUsersList = useState<Set<UserDto>>({});
addNewUsersHandler() {
context.maybePop(sharedUsersList.value.map((e) => e.uid).toList());
context.maybePop(sharedUsersList.value.map((e) => e.id).toList());
}
buildTileIcon(UserDto user) {
@ -151,7 +151,7 @@ class AlbumAdditionalSharedUserSelectionPage extends HookConsumerWidget {
onData: (users) {
for (var sharedUsers in album.sharedUsers) {
users.removeWhere(
(u) => u.uid == sharedUsers.id || u.uid == album.ownerId,
(u) => u.id == sharedUsers.id || u.id == album.ownerId,
);
}

View File

@ -85,7 +85,7 @@ class AlbumOptionsPage extends HookConsumerWidget {
void handleUserClick(UserDto user) {
var actions = [];
if (user.uid == userId) {
if (user.id == userId) {
actions = [
ListTile(
leading: const Icon(Icons.exit_to_app_rounded),
@ -170,10 +170,10 @@ class AlbumOptionsPage extends HookConsumerWidget {
color: context.colorScheme.onSurfaceSecondary,
),
),
trailing: userId == user.uid || isOwner
trailing: userId == user.id || isOwner
? const Icon(Icons.more_horiz_rounded)
: const SizedBox(),
onTap: userId == user.uid || isOwner
onTap: userId == user.id || isOwner
? () => handleUserClick(user)
: null,
);

View File

@ -33,7 +33,7 @@ class AlbumsPage extends HookConsumerWidget {
final searchController = useTextEditingController();
final debounceTimer = useRef<Timer?>(null);
final filterMode = useState(QuickFilterMode.all);
final userId = ref.watch(currentUserProvider)?.uid;
final userId = ref.watch(currentUserProvider)?.id;
final searchFocusNode = useFocusNode();
toggleViewMode() {

View File

@ -72,7 +72,7 @@ class ActivitiesPage extends HookConsumerWidget {
final activity = data[index];
final canDelete = activity.user.id == user?.id ||
album.ownerId == user?.uid;
album.ownerId == user?.id;
return Padding(
padding: const EdgeInsets.all(5),

View File

@ -3,11 +3,12 @@ import 'package:flutter_udid/flutter_udid.dart';
import 'package:hooks_riverpod/hooks_riverpod.dart';
import 'package:immich_mobile/domain/models/store.model.dart';
import 'package:immich_mobile/domain/models/user.model.dart';
import 'package:immich_mobile/domain/services/user.service.dart';
import 'package:immich_mobile/entities/store.entity.dart';
import 'package:immich_mobile/infrastructure/utils/user.converter.dart';
import 'package:immich_mobile/models/auth/auth_state.model.dart';
import 'package:immich_mobile/models/auth/login_response.model.dart';
import 'package:immich_mobile/providers/api.provider.dart';
import 'package:immich_mobile/providers/infrastructure/user.provider.dart';
import 'package:immich_mobile/services/api.service.dart';
import 'package:immich_mobile/services/auth.service.dart';
import 'package:immich_mobile/utils/hash.dart';
@ -18,20 +19,20 @@ final authProvider = StateNotifierProvider<AuthNotifier, AuthState>((ref) {
return AuthNotifier(
ref.watch(authServiceProvider),
ref.watch(apiServiceProvider),
ref.watch(userServiceProvider),
);
});
class AuthNotifier extends StateNotifier<AuthState> {
final AuthService _authService;
final ApiService _apiService;
final UserService _userService;
final _log = Logger("AuthenticationNotifier");
static const Duration _timeoutDuration = Duration(seconds: 7);
AuthNotifier(
this._authService,
this._apiService,
) : super(
AuthNotifier(this._authService, this._apiService, this._userService)
: super(
AuthState(
deviceId: "",
userId: "",
@ -106,17 +107,21 @@ class AuthNotifier extends StateNotifier<AuthState> {
String deviceId =
Store.tryGet(StoreKey.deviceId) ?? await FlutterUdid.consistentUdid;
UserDto? user = Store.tryGet(StoreKey.currentUser);
UserDto? user = _userService.tryGetMyUser();
UserAdminResponseDto? userResponse;
UserPreferencesResponseDto? userPreferences;
try {
final responses = await Future.wait([
_apiService.usersApi.getMyUser().timeout(_timeoutDuration),
_apiService.usersApi.getMyPreferences().timeout(_timeoutDuration),
]);
userResponse = responses[0] as UserAdminResponseDto;
userPreferences = responses[1] as UserPreferencesResponseDto;
final serverUser =
await _userService.refreshMyUser().timeout(_timeoutDuration);
if (serverUser == null) {
_log.severe("Unable to get user information from the server.");
} else {
// If the user information is successfully retrieved, update the store
// Due to the flow of the code, this will always happen on first login
user = serverUser;
await Store.put(StoreKey.deviceId, deviceId);
await Store.put(StoreKey.deviceIdHash, fastHash(deviceId));
await Store.put(StoreKey.accessToken, accessToken);
}
} on ApiException catch (error, stackTrace) {
if (error.code == 401) {
_log.severe("Unauthorized access, token likely expired. Logging out.");
@ -140,22 +145,6 @@ class AuthNotifier extends StateNotifier<AuthState> {
}
}
// If the user information is successfully retrieved, update the store
// Due to the flow of the code, this will always happen on first login
if (userResponse == null) {
_log.severe("Unable to get user information from the server.");
} else {
await Store.put(StoreKey.deviceId, deviceId);
await Store.put(StoreKey.deviceIdHash, fastHash(deviceId));
await Store.put(
StoreKey.currentUser,
UserConverter.fromAdminDto(userResponse, userPreferences),
);
await Store.put(StoreKey.accessToken, accessToken);
user = UserConverter.fromAdminDto(userResponse, userPreferences);
}
// If the user is null, the login was not successful
// and we don't have a local copy of the user from a prior successful login
if (user == null) {
@ -163,13 +152,13 @@ class AuthNotifier extends StateNotifier<AuthState> {
}
state = state.copyWith(
isAuthenticated: true,
userId: user.uid,
userEmail: user.email,
name: user.name,
profileImagePath: user.profileImagePath,
isAdmin: user.isAdmin,
deviceId: deviceId,
userId: user.id,
userEmail: user.email,
isAuthenticated: true,
name: user.name,
isAdmin: user.isAdmin,
profileImagePath: user.profileImagePath,
);
return true;

View File

@ -5,7 +5,7 @@ import 'package:immich_mobile/providers/locale_provider.dart';
import 'package:immich_mobile/services/timeline.service.dart';
import 'package:immich_mobile/widgets/asset_grid/asset_grid_data_structure.dart';
final singleUserTimelineProvider = StreamProvider.family<RenderList, int?>(
final singleUserTimelineProvider = StreamProvider.family<RenderList, String?>(
(ref, userId) {
if (userId == null) {
return const Stream.empty();
@ -18,7 +18,8 @@ final singleUserTimelineProvider = StreamProvider.family<RenderList, int?>(
dependencies: [localeProvider],
);
final multiUsersTimelineProvider = StreamProvider.family<RenderList, List<int>>(
final multiUsersTimelineProvider =
StreamProvider.family<RenderList, List<String>>(
(ref, userIds) {
ref.watch(localeProvider);
final timelineService = ref.watch(timelineServiceProvider);

View File

@ -1,34 +1,24 @@
import 'dart:async';
import 'package:hooks_riverpod/hooks_riverpod.dart';
import 'package:immich_mobile/domain/models/store.model.dart';
import 'package:immich_mobile/domain/models/user.model.dart';
import 'package:immich_mobile/entities/store.entity.dart';
import 'package:immich_mobile/infrastructure/utils/user.converter.dart';
import 'package:immich_mobile/providers/api.provider.dart';
import 'package:immich_mobile/services/api.service.dart';
import 'package:immich_mobile/domain/services/user.service.dart';
import 'package:immich_mobile/providers/infrastructure/user.provider.dart';
import 'package:immich_mobile/services/timeline.service.dart';
class CurrentUserProvider extends StateNotifier<UserDto?> {
CurrentUserProvider(this._apiService) : super(null) {
state = Store.tryGet(StoreKey.currentUser);
CurrentUserProvider(this._userService) : super(null) {
state = _userService.tryGetMyUser();
streamSub =
Store.watch(StoreKey.currentUser).listen((user) => state = user);
_userService.watchMyUser().listen((user) => state = user ?? state);
}
final ApiService _apiService;
final UserService _userService;
late final StreamSubscription<UserDto?> streamSub;
refresh() async {
try {
final user = await _apiService.usersApi.getMyUser();
final userPreferences = await _apiService.usersApi.getMyPreferences();
if (user != null) {
await Store.put(
StoreKey.currentUser,
UserConverter.fromAdminDto(user, userPreferences),
);
}
await _userService.refreshMyUser();
} catch (_) {}
}
@ -41,12 +31,10 @@ class CurrentUserProvider extends StateNotifier<UserDto?> {
final currentUserProvider =
StateNotifierProvider<CurrentUserProvider, UserDto?>((ref) {
return CurrentUserProvider(
ref.watch(apiServiceProvider),
);
return CurrentUserProvider(ref.watch(userServiceProvider));
});
class TimelineUserIdsProvider extends StateNotifier<List<int>> {
class TimelineUserIdsProvider extends StateNotifier<List<String>> {
TimelineUserIdsProvider(this._timelineService) : super([]) {
_timelineService.getTimelineUserIds().then((users) => state = users);
streamSub = _timelineService
@ -54,7 +42,7 @@ class TimelineUserIdsProvider extends StateNotifier<List<int>> {
.listen((users) => state = users);
}
late final StreamSubscription<List<int>> streamSub;
late final StreamSubscription<List<String>> streamSub;
final TimelineService _timelineService;
@override
@ -65,6 +53,6 @@ class TimelineUserIdsProvider extends StateNotifier<List<int>> {
}
final timelineUsersIdsProvider =
StateNotifierProvider<TimelineUserIdsProvider, List<int>>((ref) {
StateNotifierProvider<TimelineUserIdsProvider, List<String>>((ref) {
return TimelineUserIdsProvider(ref.watch(timelineServiceProvider));
});

View File

@ -10,6 +10,7 @@ import 'package:immich_mobile/interfaces/album.interface.dart';
import 'package:immich_mobile/models/albums/album_search.model.dart';
import 'package:immich_mobile/providers/db.provider.dart';
import 'package:immich_mobile/repositories/database.repository.dart';
import 'package:immich_mobile/utils/hash.dart';
import 'package:isar/isar.dart';
final albumRepositoryProvider =
@ -43,14 +44,11 @@ class AlbumRepository extends DatabaseRepository implements IAlbumRepository {
if (shared != null) {
query = query.sharedEqualTo(shared);
}
final isarUserId = fastHash(Store.get(StoreKey.currentUser).id);
if (owner == true) {
query = query.owner(
(q) => q.isarIdEqualTo(Store.get(StoreKey.currentUser).id),
);
query = query.owner((q) => q.isarIdEqualTo(isarUserId));
} else if (owner == false) {
query = query.owner(
(q) => q.not().isarIdEqualTo(Store.get(StoreKey.currentUser).id),
);
query = query.owner((q) => q.not().isarIdEqualTo(isarUserId));
}
if (remote == true) {
query = query.localIdIsNull();
@ -140,16 +138,13 @@ class AlbumRepository extends DatabaseRepository implements IAlbumRepository {
.filter()
.nameContains(searchTerm, caseSensitive: false)
.remoteIdIsNotNull();
final isarUserId = fastHash(Store.get(StoreKey.currentUser).id);
switch (filterMode) {
case QuickFilterMode.sharedWithMe:
query = query.owner(
(q) => q.not().isarIdEqualTo(Store.get(StoreKey.currentUser).id),
);
query = query.owner((q) => q.not().isarIdEqualTo(isarUserId));
case QuickFilterMode.myAlbums:
query = query.owner(
(q) => q.isarIdEqualTo(Store.get(StoreKey.currentUser).id),
);
query = query.owner((q) => q.isarIdEqualTo(isarUserId));
case QuickFilterMode.all:
break;
}

View File

@ -11,6 +11,7 @@ import 'package:immich_mobile/infrastructure/entities/exif.entity.dart';
import 'package:immich_mobile/interfaces/asset.interface.dart';
import 'package:immich_mobile/providers/db.provider.dart';
import 'package:immich_mobile/repositories/database.repository.dart';
import 'package:immich_mobile/utils/hash.dart';
import 'package:isar/isar.dart';
final assetRepositoryProvider =
@ -22,20 +23,21 @@ class AssetRepository extends DatabaseRepository implements IAssetRepository {
@override
Future<List<Asset>> getByAlbum(
Album album, {
Iterable<int> notOwnedBy = const [],
int? ownerId,
Iterable<String> notOwnedBy = const [],
String? ownerId,
AssetState? state,
AssetSort? sortBy,
}) {
var query = album.assets.filter();
final isarUserIds = notOwnedBy.map(fastHash).toList();
if (notOwnedBy.length == 1) {
query = query.not().ownerIdEqualTo(notOwnedBy.first);
query = query.not().ownerIdEqualTo(isarUserIds.first);
} else if (notOwnedBy.isNotEmpty) {
query =
query.not().anyOf(notOwnedBy, (q, int id) => q.ownerIdEqualTo(id));
query.not().anyOf(isarUserIds, (q, int id) => q.ownerIdEqualTo(id));
}
if (ownerId != null) {
query = query.ownerIdEqualTo(ownerId);
query = query.ownerIdEqualTo(fastHash(ownerId));
}
if (state != null) {
@ -87,27 +89,28 @@ class AssetRepository extends DatabaseRepository implements IAssetRepository {
@override
Future<List<Asset>> getAll({
required int ownerId,
required String ownerId,
AssetState? state,
AssetSort? sortBy,
int? limit,
}) {
final baseQuery = db.assets.where();
final isarUserIds = fastHash(ownerId);
final QueryBuilder<Asset, Asset, QAfterFilterCondition> filteredQuery =
switch (state) {
null => baseQuery.ownerIdEqualToAnyChecksum(ownerId).noOp(),
null => baseQuery.ownerIdEqualToAnyChecksum(isarUserIds).noOp(),
AssetState.local => baseQuery
.remoteIdIsNull()
.filter()
.localIdIsNotNull()
.ownerIdEqualTo(ownerId),
.ownerIdEqualTo(isarUserIds),
AssetState.remote => baseQuery
.localIdIsNull()
.filter()
.remoteIdIsNotNull()
.ownerIdEqualTo(ownerId),
.ownerIdEqualTo(isarUserIds),
AssetState.merged => baseQuery
.ownerIdEqualToAnyChecksum(ownerId)
.ownerIdEqualToAnyChecksum(isarUserIds)
.filter()
.remoteIdIsNotNull()
.localIdIsNotNull(),
@ -132,7 +135,7 @@ class AssetRepository extends DatabaseRepository implements IAssetRepository {
@override
Future<List<Asset>> getMatches({
required List<Asset> assets,
required int ownerId,
required String ownerId,
AssetState? state,
int limit = 100,
}) {
@ -147,7 +150,7 @@ class AssetRepository extends DatabaseRepository implements IAssetRepository {
AssetState.merged =>
baseQuery.localIdIsNotNull().filter().remoteIdIsNotNull(),
};
return _getMatchesImpl(query, ownerId, assets, limit);
return _getMatchesImpl(query, fastHash(ownerId), assets, limit);
}
@override
@ -185,10 +188,10 @@ class AssetRepository extends DatabaseRepository implements IAssetRepository {
@override
Future<List<Asset?>> getAllByOwnerIdChecksum(
List<int> ids,
List<int> ownerIds,
List<String> checksums,
) =>
db.assets.getAllByOwnerIdChecksum(ids, checksums);
db.assets.getAllByOwnerIdChecksum(ownerIds, checksums);
@override
Future<List<Asset>> getAllLocal() =>
@ -224,30 +227,30 @@ class AssetRepository extends DatabaseRepository implements IAssetRepository {
}
@override
Future<List<Asset>> getTrashAssets(int userId) {
Future<List<Asset>> getTrashAssets(String userId) {
return db.assets
.where()
.remoteIdIsNotNull()
.filter()
.ownerIdEqualTo(userId)
.ownerIdEqualTo(fastHash(userId))
.isTrashedEqualTo(true)
.findAll();
}
@override
Future<List<Asset>> getRecentlyAddedAssets(int userId) {
Future<List<Asset>> getRecentlyAddedAssets(String userId) {
return db.assets
.where()
.ownerIdEqualToAnyChecksum(userId)
.ownerIdEqualToAnyChecksum(fastHash(userId))
.sortByFileCreatedAtDesc()
.findAll();
}
@override
Future<List<Asset>> getMotionAssets(int userId) {
Future<List<Asset>> getMotionAssets(String userId) {
return db.assets
.where()
.ownerIdEqualToAnyChecksum(userId)
.ownerIdEqualToAnyChecksum(fastHash(userId))
.filter()
.livePhotoVideoIdIsNotNull()
.findAll();

View File

@ -4,6 +4,7 @@ import 'package:immich_mobile/domain/models/store.model.dart';
import 'package:immich_mobile/entities/asset.entity.dart';
import 'package:immich_mobile/entities/store.entity.dart';
import 'package:immich_mobile/interfaces/asset_media.interface.dart';
import 'package:immich_mobile/utils/hash.dart';
import 'package:photo_manager/photo_manager.dart' hide AssetType;
final assetMediaRepositoryProvider = Provider((ref) => AssetMediaRepository());
@ -24,7 +25,7 @@ class AssetMediaRepository implements IAssetMediaRepository {
final Asset asset = Asset(
checksum: "",
localId: local.id,
ownerId: Store.get(StoreKey.currentUser).id,
ownerId: fastHash(Store.get(StoreKey.currentUser).id),
fileCreatedAt: local.createDateTime,
fileModifiedAt: local.modifiedDateTime,
updatedAt: local.modifiedDateTime,

View File

@ -15,7 +15,7 @@ class ETagRepository extends DatabaseRepository implements IETagRepository {
Future<List<String>> getAllIds() => db.eTags.where().idProperty().findAll();
@override
Future<ETag?> get(int id) => db.eTags.get(id);
Future<ETag?> get(String id) => db.eTags.getById(id);
@override
Future<void> upsertAll(List<ETag> etags) => txn(() => db.eTags.putAll(etags));

View File

@ -6,6 +6,7 @@ import 'package:immich_mobile/infrastructure/entities/user.entity.dart';
import 'package:immich_mobile/interfaces/timeline.interface.dart';
import 'package:immich_mobile/providers/db.provider.dart';
import 'package:immich_mobile/repositories/database.repository.dart';
import 'package:immich_mobile/utils/hash.dart';
import 'package:immich_mobile/widgets/asset_grid/asset_grid_data_structure.dart';
import 'package:isar/isar.dart';
@ -17,32 +18,32 @@ class TimelineRepository extends DatabaseRepository
TimelineRepository(super.db);
@override
Future<List<int>> getTimelineUserIds(int id) {
Future<List<String>> getTimelineUserIds(String id) {
return db.users
.filter()
.inTimelineEqualTo(true)
.or()
.isarIdEqualTo(id)
.isarIdProperty()
.idEqualTo(id)
.idProperty()
.findAll();
}
@override
Stream<List<int>> watchTimelineUsers(int id) {
Stream<List<String>> watchTimelineUsers(String id) {
return db.users
.filter()
.inTimelineEqualTo(true)
.or()
.isarIdEqualTo(id)
.isarIdProperty()
.idEqualTo(id)
.idProperty()
.watch();
}
@override
Stream<RenderList> watchArchiveTimeline(int userId) {
Stream<RenderList> watchArchiveTimeline(String userId) {
final query = db.assets
.where()
.ownerIdEqualToAnyChecksum(userId)
.ownerIdEqualToAnyChecksum(fastHash(userId))
.filter()
.isArchivedEqualTo(true)
.isTrashedEqualTo(false)
@ -52,10 +53,10 @@ class TimelineRepository extends DatabaseRepository
}
@override
Stream<RenderList> watchFavoriteTimeline(int userId) {
Stream<RenderList> watchFavoriteTimeline(String userId) {
final query = db.assets
.where()
.ownerIdEqualToAnyChecksum(userId)
.ownerIdEqualToAnyChecksum(fastHash(userId))
.filter()
.isFavoriteEqualTo(true)
.isTrashedEqualTo(false)
@ -79,10 +80,10 @@ class TimelineRepository extends DatabaseRepository
}
@override
Stream<RenderList> watchTrashTimeline(int userId) {
Stream<RenderList> watchTrashTimeline(String userId) {
final query = db.assets
.filter()
.ownerIdEqualTo(userId)
.ownerIdEqualTo(fastHash(userId))
.isTrashedEqualTo(true)
.sortByFileCreatedAtDesc();
@ -103,12 +104,12 @@ class TimelineRepository extends DatabaseRepository
@override
Stream<RenderList> watchHomeTimeline(
int userId,
String userId,
GroupAssetsBy groupAssetByOption,
) {
final query = db.assets
.where()
.ownerIdEqualToAnyChecksum(userId)
.ownerIdEqualToAnyChecksum(fastHash(userId))
.filter()
.isArchivedEqualTo(false)
.isTrashedEqualTo(false)
@ -120,12 +121,13 @@ class TimelineRepository extends DatabaseRepository
@override
Stream<RenderList> watchMultiUsersTimeline(
List<int> userIds,
List<String> userIds,
GroupAssetsBy groupAssetByOption,
) {
final isarUserIds = userIds.map(fastHash).toList();
final query = db.assets
.where()
.anyOf(userIds, (qb, userId) => qb.ownerIdEqualToAnyChecksum(userId))
.anyOf(isarUserIds, (qb, id) => qb.ownerIdEqualToAnyChecksum(id))
.filter()
.isArchivedEqualTo(false)
.isTrashedEqualTo(false)
@ -143,12 +145,12 @@ class TimelineRepository extends DatabaseRepository
}
@override
Stream<RenderList> watchAssetSelectionTimeline(int userId) {
Stream<RenderList> watchAssetSelectionTimeline(String userId) {
final query = db.assets
.where()
.remoteIdIsNotNull()
.filter()
.ownerIdEqualTo(userId)
.ownerIdEqualTo(fastHash(userId))
.isTrashedEqualTo(false)
.stackPrimaryAssetIdIsNull()
.sortByFileCreatedAtDesc();

View File

@ -1,11 +1,8 @@
import 'package:auto_route/auto_route.dart';
import 'package:flutter/foundation.dart';
import 'package:hooks_riverpod/hooks_riverpod.dart';
import 'package:immich_mobile/domain/models/store.model.dart';
import 'package:immich_mobile/entities/store.entity.dart';
import 'package:immich_mobile/infrastructure/utils/user.converter.dart';
import 'package:immich_mobile/providers/api.provider.dart';
import 'package:immich_mobile/providers/asset.provider.dart';
import 'package:immich_mobile/providers/infrastructure/user.provider.dart';
import 'package:immich_mobile/providers/memory.provider.dart';
import 'package:immich_mobile/providers/server_info.provider.dart';
@ -28,19 +25,7 @@ class TabNavigationObserver extends AutoRouterObserver {
// Update user info
try {
final userResponseDto =
await ref.read(apiServiceProvider).usersApi.getMyUser();
final userPreferences =
await ref.read(apiServiceProvider).usersApi.getMyPreferences();
if (userResponseDto == null) {
return;
}
await Store.put(
StoreKey.currentUser,
UserConverter.fromAdminDto(userResponseDto, userPreferences),
);
ref.read(userServiceProvider).refreshMyUser();
ref.read(serverInfoProvider.notifier).getServerVersion();
} catch (e) {
debugPrint("Error refreshing user info $e");

View File

@ -6,12 +6,11 @@ import 'package:collection/collection.dart';
import 'package:flutter/foundation.dart';
import 'package:hooks_riverpod/hooks_riverpod.dart';
import 'package:immich_mobile/constants/enums.dart';
import 'package:immich_mobile/domain/models/store.model.dart';
import 'package:immich_mobile/domain/models/user.model.dart';
import 'package:immich_mobile/domain/services/user.service.dart';
import 'package:immich_mobile/entities/album.entity.dart';
import 'package:immich_mobile/entities/asset.entity.dart';
import 'package:immich_mobile/entities/backup_album.entity.dart';
import 'package:immich_mobile/entities/store.entity.dart';
import 'package:immich_mobile/infrastructure/entities/user.entity.dart'
as entity;
import 'package:immich_mobile/interfaces/album.interface.dart';
@ -21,6 +20,7 @@ import 'package:immich_mobile/interfaces/asset.interface.dart';
import 'package:immich_mobile/interfaces/backup_album.interface.dart';
import 'package:immich_mobile/models/albums/album_add_asset_response.model.dart';
import 'package:immich_mobile/models/albums/album_search.model.dart';
import 'package:immich_mobile/providers/infrastructure/user.provider.dart';
import 'package:immich_mobile/repositories/album.repository.dart';
import 'package:immich_mobile/repositories/album_api.repository.dart';
import 'package:immich_mobile/repositories/album_media.repository.dart';
@ -28,11 +28,13 @@ import 'package:immich_mobile/repositories/asset.repository.dart';
import 'package:immich_mobile/repositories/backup.repository.dart';
import 'package:immich_mobile/services/entity.service.dart';
import 'package:immich_mobile/services/sync.service.dart';
import 'package:immich_mobile/utils/hash.dart';
import 'package:logging/logging.dart';
final albumServiceProvider = Provider(
(ref) => AlbumService(
ref.watch(syncServiceProvider),
ref.watch(userServiceProvider),
ref.watch(entityServiceProvider),
ref.watch(albumRepositoryProvider),
ref.watch(assetRepositoryProvider),
@ -44,6 +46,7 @@ final albumServiceProvider = Provider(
class AlbumService {
final SyncService _syncService;
final UserService _userService;
final EntityService _entityService;
final IAlbumRepository _albumRepository;
final IAssetRepository _assetRepository;
@ -56,6 +59,7 @@ class AlbumService {
AlbumService(
this._syncService,
this._userService,
this._entityService,
this._albumRepository,
this._assetRepository,
@ -205,7 +209,7 @@ class AlbumService {
final Album album = await _albumApiRepository.create(
albumName,
assetIds: assets.map((asset) => asset.remoteId!),
sharedUserIds: sharedUsers.map((user) => user.uid),
sharedUserIds: sharedUsers.map((user) => user.id),
);
await _entityService.fillAlbumWithDatabaseEntities(album);
return _albumRepository.create(album);
@ -292,8 +296,8 @@ class AlbumService {
Future<bool> deleteAlbum(Album album) async {
try {
final userId = Store.get(StoreKey.currentUser).id;
if (album.owner.value?.isarId == userId) {
final userId = _userService.getMyUser().id;
if (album.owner.value?.isarId == fastHash(userId)) {
await _albumApiRepository.delete(album.remoteId!);
}
if (album.shared) {
@ -359,7 +363,7 @@ class AlbumService {
try {
await _albumApiRepository.removeUser(
album.remoteId!,
userId: user.uid,
userId: user.id,
);
album.sharedUsers.remove(entity.User.fromDto(user));

View File

@ -35,6 +35,9 @@ class ApiService implements Authentication {
late MemoriesApi memoriesApi;
ApiService() {
// The below line ensures that the api clients are initialized when the service is instantiated
// This is required to avoid late initialization errors when the clients are access before the endpoint is resolved
setEndpoint('');
final endpoint = Store.tryGet(StoreKey.serverEndpoint);
if (endpoint != null && endpoint.isNotEmpty) {
setEndpoint(endpoint);

View File

@ -6,9 +6,8 @@ import 'package:flutter/material.dart';
import 'package:hooks_riverpod/hooks_riverpod.dart';
import 'package:immich_mobile/domain/interfaces/exif.interface.dart';
import 'package:immich_mobile/domain/interfaces/user.interface.dart';
import 'package:immich_mobile/domain/models/store.model.dart';
import 'package:immich_mobile/domain/models/user.model.dart';
import 'package:immich_mobile/domain/services/store.service.dart';
import 'package:immich_mobile/domain/services/user.service.dart';
import 'package:immich_mobile/entities/asset.entity.dart';
import 'package:immich_mobile/entities/backup_album.entity.dart';
import 'package:immich_mobile/interfaces/asset.interface.dart';
@ -19,9 +18,7 @@ import 'package:immich_mobile/interfaces/etag.interface.dart';
import 'package:immich_mobile/models/backup/backup_candidate.model.dart';
import 'package:immich_mobile/providers/api.provider.dart';
import 'package:immich_mobile/providers/infrastructure/exif.provider.dart';
import 'package:immich_mobile/providers/infrastructure/store.provider.dart';
import 'package:immich_mobile/providers/infrastructure/user.provider.dart'
hide userServiceProvider;
import 'package:immich_mobile/providers/infrastructure/user.provider.dart';
import 'package:immich_mobile/repositories/asset.repository.dart';
import 'package:immich_mobile/repositories/asset_api.repository.dart';
import 'package:immich_mobile/repositories/asset_media.repository.dart';
@ -47,7 +44,7 @@ final assetServiceProvider = Provider(
ref.watch(syncServiceProvider),
ref.watch(backupServiceProvider),
ref.watch(albumServiceProvider),
ref.watch(storeServiceProvider),
ref.watch(userServiceProvider),
ref.watch(assetMediaRepositoryProvider),
),
);
@ -63,7 +60,7 @@ class AssetService {
final SyncService _syncService;
final BackupService _backupService;
final AlbumService _albumService;
final StoreService _storeService;
final UserService _userService;
final IAssetMediaRepository _assetMediaRepository;
final log = Logger('AssetService');
@ -78,7 +75,7 @@ class AssetService {
this._syncService,
this._backupService,
this._albumService,
this._storeService,
this._userService,
this._assetMediaRepository,
);
@ -104,7 +101,7 @@ class AssetService {
_getRemoteAssetChanges(List<UserDto> users, DateTime since) async {
final dto = AssetDeltaSyncDto(
updatedAfter: since,
userIds: users.map((e) => e.uid).toList(),
userIds: users.map((e) => e.id).toList(),
);
final changes = await _apiService.syncApi.getDeltaSync(dto);
return changes == null || changes.needsFullSync
@ -145,7 +142,7 @@ class AssetService {
limit: chunkSize,
updatedUntil: until,
lastId: lastId,
userId: user.uid,
userId: user.id,
);
log.fine("Requesting $chunkSize assets from $lastId");
final List<AssetResponseDto>? assets =
@ -316,7 +313,7 @@ class AssetService {
);
await refreshRemoteAssets();
final owner = _storeService.get(StoreKey.currentUser);
final owner = _userService.getMyUser();
final remoteAssets = await _assetRepository.getAll(
ownerId: owner.id,
state: AssetState.merged,
@ -522,12 +519,12 @@ class AssetService {
}
Future<List<Asset>> getRecentlyAddedAssets() {
final me = _storeService.get(StoreKey.currentUser);
final me = _userService.getMyUser();
return _assetRepository.getRecentlyAddedAssets(me.id);
}
Future<List<Asset>> getMotionAssets() {
final me = _storeService.get(StoreKey.currentUser);
final me = _userService.getMyUser();
return _assetRepository.getMotionAssets(me.id);
}
}

Some files were not shown because too many files have changed in this diff Show More