feat: Use postgres as a queue

We've been keen to try this for a while as it means we can remove redis as a
dependency, which makes Immich easier to setup and run.

This replaces bullmq with a bespoke postgres queue. Jobs in the queue are
processed either immediately via triggers and notifications, or eventually if a
notification is missed.
This commit is contained in:
Thomas Way 2025-04-30 22:42:18 +01:00
parent b845184c80
commit d46e5f2436
No known key found for this signature in database
GPG Key ID: F98E7FF1F9F8C217
47 changed files with 751 additions and 933 deletions

View File

@ -33,6 +33,7 @@ services:
- ${UPLOAD_LOCATION}/photos/upload:/usr/src/app/upload/upload - ${UPLOAD_LOCATION}/photos/upload:/usr/src/app/upload/upload
- /usr/src/app/node_modules - /usr/src/app/node_modules
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
- ../flickr30k-images:/flickr30k:ro
env_file: env_file:
- .env - .env
environment: environment:
@ -58,7 +59,6 @@ services:
- 9231:9231 - 9231:9231
- 2283:2283 - 2283:2283
depends_on: depends_on:
- redis
- database - database
healthcheck: healthcheck:
disable: false disable: false
@ -114,12 +114,6 @@ services:
healthcheck: healthcheck:
disable: false disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:c855f98e09d558a0d7cc1a4e56473231206a4c54c0114ada9c485b47aeb92ec8
healthcheck:
test: redis-cli ping || exit 1
database: database:
container_name: immich_postgres container_name: immich_postgres
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52 image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
@ -154,25 +148,25 @@ services:
-c wal_compression=on -c wal_compression=on
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics # set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# immich-prometheus: immich-prometheus:
# container_name: immich_prometheus container_name: immich_prometheus
# ports: ports:
# - 9090:9090 - 9090:9090
# image: prom/prometheus image: prom/prometheus
# volumes: volumes:
# - ./prometheus.yml:/etc/prometheus/prometheus.yml - ./prometheus.yml:/etc/prometheus/prometheus.yml
# - prometheus-data:/prometheus - prometheus-data:/prometheus
# first login uses admin/admin # first login uses admin/admin
# add data source for http://immich-prometheus:9090 to get started # add data source for http://immich-prometheus:9090 to get started
# immich-grafana: immich-grafana:
# container_name: immich_grafana container_name: immich_grafana
# command: ['./run.sh', '-disable-reporting'] command: ['./run.sh', '-disable-reporting']
# ports: ports:
# - 3000:3000 - 3001:3000
# image: grafana/grafana:10.3.3-ubuntu image: grafana/grafana:10.3.3-ubuntu
# volumes: volumes:
# - grafana-data:/var/lib/grafana - grafana-data:/var/lib/grafana
volumes: volumes:
model-cache: model-cache:

View File

@ -27,7 +27,6 @@ services:
ports: ports:
- 2283:2283 - 2283:2283
depends_on: depends_on:
- redis
- database - database
restart: always restart: always
healthcheck: healthcheck:
@ -54,13 +53,6 @@ services:
healthcheck: healthcheck:
disable: false disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:c855f98e09d558a0d7cc1a4e56473231206a4c54c0114ada9c485b47aeb92ec8
healthcheck:
test: redis-cli ping || exit 1
restart: always
database: database:
container_name: immich_postgres container_name: immich_postgres
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52 image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52

View File

@ -25,7 +25,6 @@ services:
ports: ports:
- '2283:2283' - '2283:2283'
depends_on: depends_on:
- redis
- database - database
restart: always restart: always
healthcheck: healthcheck:
@ -47,13 +46,6 @@ services:
healthcheck: healthcheck:
disable: false disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:c855f98e09d558a0d7cc1a4e56473231206a4c54c0114ada9c485b47aeb92ec8
healthcheck:
test: redis-cli ping || exit 1
restart: always
database: database:
container_name: immich_postgres container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52 image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52

View File

@ -1,12 +1,14 @@
global: global:
scrape_interval: 15s scrape_interval: 3s
evaluation_interval: 15s evaluation_interval: 3s
scrape_configs: scrape_configs:
- job_name: immich_api - job_name: immich_api
scrape_interval: 3s
static_configs: static_configs:
- targets: ['immich-server:8081'] - targets: ["immich-server:8081"]
- job_name: immich_microservices - job_name: immich_microservices
scrape_interval:
static_configs: static_configs:
- targets: ['immich-server:8082'] - targets: ["immich-server:8082"]

View File

@ -367,12 +367,6 @@ You need to [enable WebSockets](/docs/administration/reverse-proxy/) on your rev
Immich components are typically deployed using docker. To see logs for deployed docker containers, you can use the [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/), specifically the `docker logs` command. For examples, see [Docker Help](/docs/guides/docker-help.md). Immich components are typically deployed using docker. To see logs for deployed docker containers, you can use the [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/), specifically the `docker logs` command. For examples, see [Docker Help](/docs/guides/docker-help.md).
### How can I reduce the log verbosity of Redis?
To decrease Redis logs, you can add the following line to the `redis:` section of the `docker-compose.yml`:
` command: redis-server --loglevel warning`
### How can I run Immich as a non-root user? ### How can I run Immich as a non-root user?
You can change the user in the container by setting the `user` argument in `docker-compose.yml` for each service. You can change the user in the container by setting the `user` argument in `docker-compose.yml` for each service.
@ -380,7 +374,6 @@ You may need to add mount points or docker volumes for the following internal co
- `immich-machine-learning:/.config` - `immich-machine-learning:/.config`
- `immich-machine-learning:/.cache` - `immich-machine-learning:/.cache`
- `redis:/data`
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION` and `/cache` for machine-learning. The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION` and `/cache` for machine-learning.

View File

@ -13,7 +13,7 @@ Immich uses a traditional client-server design, with a dedicated database for da
<img alt="Immich Architecture" src={AppArchitecture} className="p-4 dark:bg-immich-dark-primary my-4" /> <img alt="Immich Architecture" src={AppArchitecture} className="p-4 dark:bg-immich-dark-primary my-4" />
The diagram shows clients communicating with the server's API via REST. The server communicates with downstream systems (i.e. Redis, Postgres, Machine Learning, file system) through repository interfaces. Not shown in the diagram, is that the server is split into two separate containers `immich-server` and `immich-microservices`. The microservices container does not handle API requests or schedule cron jobs, but primarily handles incoming job requests from Redis. The diagram shows clients communicating with the server's API via REST. The server communicates with downstream systems (i.e. Postgres, Machine Learning, file system) through repository interfaces. Not shown in the diagram, is that the server is split into two separate containers `immich-server` and `immich-microservices`. The microservices container does not handle API requests or schedule cron jobs, but primarily handles incoming job requests from Postgres.
## Clients ## Clients
@ -53,7 +53,6 @@ The Immich backend is divided into several services, which are run as individual
1. `immich-server` - Handle and respond to REST API requests, execute background jobs (thumbnail generation, metadata extraction, transcoding, etc.) 1. `immich-server` - Handle and respond to REST API requests, execute background jobs (thumbnail generation, metadata extraction, transcoding, etc.)
1. `immich-machine-learning` - Execute machine learning models 1. `immich-machine-learning` - Execute machine learning models
1. `postgres` - Persistent data storage 1. `postgres` - Persistent data storage
1. `redis`- Queue management for background jobs
### Immich Server ### Immich Server
@ -111,7 +110,3 @@ Immich persists data in Postgres, which includes information about access and au
:::info :::info
See [Database Migrations](./database-migrations.md) for more information about how to modify the database to create an index, modify a table, add a new column, etc. See [Database Migrations](./database-migrations.md) for more information about how to modify the database to create an index, modify a table, add a new column, etc.
::: :::
### Redis
Immich uses [Redis](https://redis.com/) via [BullMQ](https://docs.bullmq.io/) to manage job queues. Some jobs trigger subsequent jobs. For example, Smart Search and Facial Recognition relies on thumbnail generation and automatically run after one is generated.

View File

@ -23,7 +23,6 @@ This environment includes the services below. Additional details are available i
- Server - [`/server`](https://github.com/immich-app/immich/tree/main/server) - Server - [`/server`](https://github.com/immich-app/immich/tree/main/server)
- Web app - [`/web`](https://github.com/immich-app/immich/tree/main/web) - Web app - [`/web`](https://github.com/immich-app/immich/tree/main/web)
- Machine learning - [`/machine-learning`](https://github.com/immich-app/immich/tree/main/machine-learning) - Machine learning - [`/machine-learning`](https://github.com/immich-app/immich/tree/main/machine-learning)
- Redis
- PostgreSQL development database with exposed port `5432` so you can use any database client to access it - PostgreSQL development database with exposed port `5432` so you can use any database client to access it
All the services are packaged to run as with single Docker Compose command. All the services are packaged to run as with single Docker Compose command.

View File

@ -1,6 +1,6 @@
# Scaling Immich # Scaling Immich
Immich is built with modern deployment practices in mind, and the backend is designed to be able to run multiple instances in parallel. When doing this, the only requirement you need to be aware of is that every instance needs to be connected to the shared infrastructure. That means they should all have access to the same Postgres and Redis instances, and have the same files mounted into the containers. Immich is built with modern deployment practices in mind, and the backend is designed to be able to run multiple instances in parallel. When doing this, the only requirement you need to be aware of is that every instance needs to be connected to the shared infrastructure. That means they should all have access to the same Postgres instance, and have the same files mounted into the containers.
Scaling can be useful for many reasons. Maybe you have a gaming PC that you want to use for transcoding and thumbnail generation, or perhaps you run a Kubernetes cluster across a handful of powerful servers that you want to make use of. Scaling can be useful for many reasons. Maybe you have a gaming PC that you want to use for transcoding and thumbnail generation, or perhaps you run a Kubernetes cluster across a handful of powerful servers that you want to make use of.
@ -16,4 +16,4 @@ By default, each running `immich-server` container comes with multiple internal
## Scaling down ## Scaling down
In the same way you can scale up to multiple containers, you can also choose to scale down. All state is stored in Postgres, Redis, and the filesystem so there is no risk in stopping a running immich-server container, for example if you want to use your GPU to play some games. As long as there is an API worker running you will still be able to browse Immich, and jobs will wait to be processed until there is a worker available for them. In the same way you can scale up to multiple containers, you can also choose to scale down. All state is stored in Postgres and the filesystem so there is no risk in stopping a running immich-server container, for example if you want to use your GPU to play some games. As long as there is an API worker running you will still be able to browse Immich, and jobs will wait to be processed until there is a worker available for them.

View File

@ -98,54 +98,6 @@ When `DB_URL` is defined, the `DB_HOSTNAME`, `DB_PORT`, `DB_USERNAME`, `DB_PASSW
::: :::
## Redis
| Variable | Description | Default | Containers |
| :--------------- | :------------- | :-----: | :--------- |
| `REDIS_URL` | Redis URL | | server |
| `REDIS_SOCKET` | Redis socket | | server |
| `REDIS_HOSTNAME` | Redis host | `redis` | server |
| `REDIS_PORT` | Redis port | `6379` | server |
| `REDIS_USERNAME` | Redis username | | server |
| `REDIS_PASSWORD` | Redis password | | server |
| `REDIS_DBINDEX` | Redis DB index | `0` | server |
:::info
All `REDIS_` variables must be provided to all Immich workers, including `api` and `microservices`.
`REDIS_URL` must start with `ioredis://` and then include a `base64` encoded JSON string for the configuration.
More information can be found in the upstream [ioredis] documentation.
When `REDIS_URL` or `REDIS_SOCKET` are defined, the `REDIS_HOSTNAME`, `REDIS_PORT`, `REDIS_USERNAME`, `REDIS_PASSWORD`, and `REDIS_DBINDEX` variables are ignored.
:::
Redis (Sentinel) URL example JSON before encoding:
<details>
<summary>JSON</summary>
```json
{
"sentinels": [
{
"host": "redis-sentinel-node-0",
"port": 26379
},
{
"host": "redis-sentinel-node-1",
"port": 26379
},
{
"host": "redis-sentinel-node-2",
"port": 26379
}
],
"name": "redis-sentinel"
}
```
</details>
## Machine Learning ## Machine Learning
| Variable | Description | Default | Containers | | Variable | Description | Default | Containers |
@ -212,16 +164,10 @@ the `_FILE` variable should be set to the path of a file containing the variable
| `DB_USERNAME` | `DB_USERNAME_FILE`<sup>\*1</sup> | | `DB_USERNAME` | `DB_USERNAME_FILE`<sup>\*1</sup> |
| `DB_PASSWORD` | `DB_PASSWORD_FILE`<sup>\*1</sup> | | `DB_PASSWORD` | `DB_PASSWORD_FILE`<sup>\*1</sup> |
| `DB_URL` | `DB_URL_FILE`<sup>\*1</sup> | | `DB_URL` | `DB_URL_FILE`<sup>\*1</sup> |
| `REDIS_PASSWORD` | `REDIS_PASSWORD_FILE`<sup>\*2</sup> |
\*1: See the [official documentation][docker-secrets-docs] for \*1: See the [official documentation][docker-secrets-docs] for
details on how to use Docker Secrets in the Postgres image. details on how to use Docker Secrets in the Postgres image.
\*2: See [this comment][docker-secrets-example] for an example of how
to use a Docker secret for the password in the Redis container.
[tz-list]: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List [tz-list]: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
[docker-secrets-example]: https://github.com/docker-library/redis/issues/46#issuecomment-335326234
[docker-secrets-docs]: https://github.com/docker-library/docs/tree/master/postgres#docker-secrets [docker-secrets-docs]: https://github.com/docker-library/docs/tree/master/postgres#docker-secrets
[docker-secrets]: https://docs.docker.com/engine/swarm/secrets/ [docker-secrets]: https://docs.docker.com/engine/swarm/secrets/
[ioredis]: https://ioredis.readthedocs.io/en/latest/README/#connect-to-redis

View File

@ -107,8 +107,6 @@ Accept the default option or select the **Machine Learning Image Type** for your
Immich's default is `postgres` but you should consider setting the **Database Password** to a custom value using only the characters `A-Za-z0-9`. Immich's default is `postgres` but you should consider setting the **Database Password** to a custom value using only the characters `A-Za-z0-9`.
The **Redis Password** should be set to a custom value using only the characters `A-Za-z0-9`.
Accept the **Log Level** default of **Log**. Accept the **Log Level** default of **Log**.
Leave **Hugging Face Endpoint** blank. (This is for downloading ML models from a different source.) Leave **Hugging Face Endpoint** blank. (This is for downloading ML models from a different source.)
@ -242,7 +240,7 @@ className="border rounded-xl"
:::info :::info
Some Environment Variables are not available for the TrueNAS SCALE app. This is mainly because they can be configured through GUI options in the [Edit Immich screen](#edit-app-settings). Some Environment Variables are not available for the TrueNAS SCALE app. This is mainly because they can be configured through GUI options in the [Edit Immich screen](#edit-app-settings).
Some examples are: `IMMICH_VERSION`, `UPLOAD_LOCATION`, `DB_DATA_LOCATION`, `TZ`, `IMMICH_LOG_LEVEL`, `DB_PASSWORD`, `REDIS_PASSWORD`. Some examples are: `IMMICH_VERSION`, `UPLOAD_LOCATION`, `DB_DATA_LOCATION`, `TZ`, `IMMICH_LOG_LEVEL`, `DB_PASSWORD`.
::: :::
## Updating the App ## Updating the App

View File

@ -17,9 +17,9 @@ Immich can easily be installed and updated on Unraid via:
::: :::
In order to install Immich from the Unraid CA, you will need an existing Redis and PostgreSQL 14 container, If you do not already have Redis or PostgreSQL you can install them from the Unraid CA, just make sure you choose PostgreSQL **14**. In order to install Immich from the Unraid CA, you will need an existing PostgreSQL 14 container, If you do not already have PostgreSQL you can install it from the Unraid CA, just make sure you choose PostgreSQL **14**.
Once you have Redis and PostgreSQL running, search for Immich on the Unraid CA, choose either of the templates listed and fill out the example variables. Once you have PostgreSQL running, search for Immich on the Unraid CA, choose either of the templates listed and fill out the example variables.
For more information about setting up the community image see [here](https://github.com/imagegenius/docker-immich#application-setup) For more information about setting up the community image see [here](https://github.com/imagegenius/docker-immich#application-setup)

View File

@ -28,14 +28,10 @@ services:
extra_hosts: extra_hosts:
- 'auth-server:host-gateway' - 'auth-server:host-gateway'
depends_on: depends_on:
- redis
- database - database
ports: ports:
- 2285:2285 - 2285:2285
redis:
image: redis:6.2-alpine@sha256:3211c33a618c457e5d241922c975dbc4f446d0bdb2dc75694f5573ef8e2d01fa
database: database:
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52 image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
command: -c fsync=off -c shared_preload_libraries=vectors.so command: -c fsync=off -c shared_preload_libraries=vectors.so

View File

@ -78,7 +78,7 @@ describe('/jobs', () => {
} }
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, { await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
command: JobCommand.Empty, command: JobCommand.Clear,
force: false, force: false,
}); });
@ -160,7 +160,7 @@ describe('/jobs', () => {
expect(assetBefore.thumbhash).toBeNull(); expect(assetBefore.thumbhash).toBeNull();
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, { await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Empty, command: JobCommand.Clear,
force: false, force: false,
}); });

View File

@ -59,7 +59,7 @@ show_friendly_message() {
Successfully deployed Immich! Successfully deployed Immich!
You can access the website or the mobile app at http://$ip_address:2283 You can access the website or the mobile app at http://$ip_address:2283
--------------------------------------------------- ---------------------------------------------------
If you want to configure custom information of the server, including the database, Redis information, or the backup (or upload) location, etc. If you want to configure custom information of the server, including the database, or the backup (or upload) location, etc.
1. First bring down the containers with the command 'docker compose down' in the immich-app directory, 1. First bring down the containers with the command 'docker compose down' in the immich-app directory,

View File

@ -26,7 +26,7 @@ class JobCommand {
static const start = JobCommand._(r'start'); static const start = JobCommand._(r'start');
static const pause = JobCommand._(r'pause'); static const pause = JobCommand._(r'pause');
static const resume = JobCommand._(r'resume'); static const resume = JobCommand._(r'resume');
static const empty = JobCommand._(r'empty'); static const clear = JobCommand._(r'clear');
static const clearFailed = JobCommand._(r'clear-failed'); static const clearFailed = JobCommand._(r'clear-failed');
/// List of all possible values in this [enum][JobCommand]. /// List of all possible values in this [enum][JobCommand].
@ -34,7 +34,7 @@ class JobCommand {
start, start,
pause, pause,
resume, resume,
empty, clear,
clearFailed, clearFailed,
]; ];
@ -77,7 +77,7 @@ class JobCommandTypeTransformer {
case r'start': return JobCommand.start; case r'start': return JobCommand.start;
case r'pause': return JobCommand.pause; case r'pause': return JobCommand.pause;
case r'resume': return JobCommand.resume; case r'resume': return JobCommand.resume;
case r'empty': return JobCommand.empty; case r'clear': return JobCommand.clear;
case r'clear-failed': return JobCommand.clearFailed; case r'clear-failed': return JobCommand.clearFailed;
default: default:
if (!allowNull) { if (!allowNull) {

View File

@ -14,54 +14,42 @@ class JobCountsDto {
/// Returns a new [JobCountsDto] instance. /// Returns a new [JobCountsDto] instance.
JobCountsDto({ JobCountsDto({
required this.active, required this.active,
required this.completed,
required this.delayed, required this.delayed,
required this.failed, required this.failed,
required this.paused,
required this.waiting, required this.waiting,
}); });
int active; int active;
int completed;
int delayed; int delayed;
int failed; int failed;
int paused;
int waiting; int waiting;
@override @override
bool operator ==(Object other) => identical(this, other) || other is JobCountsDto && bool operator ==(Object other) => identical(this, other) || other is JobCountsDto &&
other.active == active && other.active == active &&
other.completed == completed &&
other.delayed == delayed && other.delayed == delayed &&
other.failed == failed && other.failed == failed &&
other.paused == paused &&
other.waiting == waiting; other.waiting == waiting;
@override @override
int get hashCode => int get hashCode =>
// ignore: unnecessary_parenthesis // ignore: unnecessary_parenthesis
(active.hashCode) + (active.hashCode) +
(completed.hashCode) +
(delayed.hashCode) + (delayed.hashCode) +
(failed.hashCode) + (failed.hashCode) +
(paused.hashCode) +
(waiting.hashCode); (waiting.hashCode);
@override @override
String toString() => 'JobCountsDto[active=$active, completed=$completed, delayed=$delayed, failed=$failed, paused=$paused, waiting=$waiting]'; String toString() => 'JobCountsDto[active=$active, delayed=$delayed, failed=$failed, waiting=$waiting]';
Map<String, dynamic> toJson() { Map<String, dynamic> toJson() {
final json = <String, dynamic>{}; final json = <String, dynamic>{};
json[r'active'] = this.active; json[r'active'] = this.active;
json[r'completed'] = this.completed;
json[r'delayed'] = this.delayed; json[r'delayed'] = this.delayed;
json[r'failed'] = this.failed; json[r'failed'] = this.failed;
json[r'paused'] = this.paused;
json[r'waiting'] = this.waiting; json[r'waiting'] = this.waiting;
return json; return json;
} }
@ -76,10 +64,8 @@ class JobCountsDto {
return JobCountsDto( return JobCountsDto(
active: mapValueOfType<int>(json, r'active')!, active: mapValueOfType<int>(json, r'active')!,
completed: mapValueOfType<int>(json, r'completed')!,
delayed: mapValueOfType<int>(json, r'delayed')!, delayed: mapValueOfType<int>(json, r'delayed')!,
failed: mapValueOfType<int>(json, r'failed')!, failed: mapValueOfType<int>(json, r'failed')!,
paused: mapValueOfType<int>(json, r'paused')!,
waiting: mapValueOfType<int>(json, r'waiting')!, waiting: mapValueOfType<int>(json, r'waiting')!,
); );
} }
@ -129,10 +115,8 @@ class JobCountsDto {
/// The list of required keys that must be present in a JSON. /// The list of required keys that must be present in a JSON.
static const requiredKeys = <String>{ static const requiredKeys = <String>{
'active', 'active',
'completed',
'delayed', 'delayed',
'failed', 'failed',
'paused',
'waiting', 'waiting',
}; };
} }

View File

@ -13,32 +13,26 @@ part of openapi.api;
class QueueStatusDto { class QueueStatusDto {
/// Returns a new [QueueStatusDto] instance. /// Returns a new [QueueStatusDto] instance.
QueueStatusDto({ QueueStatusDto({
required this.isActive, required this.paused,
required this.isPaused,
}); });
bool isActive; bool paused;
bool isPaused;
@override @override
bool operator ==(Object other) => identical(this, other) || other is QueueStatusDto && bool operator ==(Object other) => identical(this, other) || other is QueueStatusDto &&
other.isActive == isActive && other.paused == paused;
other.isPaused == isPaused;
@override @override
int get hashCode => int get hashCode =>
// ignore: unnecessary_parenthesis // ignore: unnecessary_parenthesis
(isActive.hashCode) + (paused.hashCode);
(isPaused.hashCode);
@override @override
String toString() => 'QueueStatusDto[isActive=$isActive, isPaused=$isPaused]'; String toString() => 'QueueStatusDto[paused=$paused]';
Map<String, dynamic> toJson() { Map<String, dynamic> toJson() {
final json = <String, dynamic>{}; final json = <String, dynamic>{};
json[r'isActive'] = this.isActive; json[r'paused'] = this.paused;
json[r'isPaused'] = this.isPaused;
return json; return json;
} }
@ -51,8 +45,7 @@ class QueueStatusDto {
final json = value.cast<String, dynamic>(); final json = value.cast<String, dynamic>();
return QueueStatusDto( return QueueStatusDto(
isActive: mapValueOfType<bool>(json, r'isActive')!, paused: mapValueOfType<bool>(json, r'paused')!,
isPaused: mapValueOfType<bool>(json, r'isPaused')!,
); );
} }
return null; return null;
@ -100,8 +93,7 @@ class QueueStatusDto {
/// The list of required keys that must be present in a JSON. /// The list of required keys that must be present in a JSON.
static const requiredKeys = <String>{ static const requiredKeys = <String>{
'isActive', 'paused',
'isPaused',
}; };
} }

View File

@ -9621,7 +9621,7 @@
"start", "start",
"pause", "pause",
"resume", "resume",
"empty", "clear",
"clear-failed" "clear-failed"
], ],
"type": "string" "type": "string"
@ -9649,28 +9649,20 @@
"active": { "active": {
"type": "integer" "type": "integer"
}, },
"completed": {
"type": "integer"
},
"delayed": { "delayed": {
"type": "integer" "type": "integer"
}, },
"failed": { "failed": {
"type": "integer" "type": "integer"
}, },
"paused": {
"type": "integer"
},
"waiting": { "waiting": {
"type": "integer" "type": "integer"
} }
}, },
"required": [ "required": [
"active", "active",
"completed",
"delayed", "delayed",
"failed", "failed",
"paused",
"waiting" "waiting"
], ],
"type": "object" "type": "object"
@ -11007,16 +10999,12 @@
}, },
"QueueStatusDto": { "QueueStatusDto": {
"properties": { "properties": {
"isActive": { "paused": {
"type": "boolean"
},
"isPaused": {
"type": "boolean" "type": "boolean"
} }
}, },
"required": [ "required": [
"isActive", "paused"
"isPaused"
], ],
"type": "object" "type": "object"
}, },

View File

@ -577,15 +577,12 @@ export type FaceDto = {
}; };
export type JobCountsDto = { export type JobCountsDto = {
active: number; active: number;
completed: number;
delayed: number; delayed: number;
failed: number; failed: number;
paused: number;
waiting: number; waiting: number;
}; };
export type QueueStatusDto = { export type QueueStatusDto = {
isActive: boolean; paused: boolean;
isPaused: boolean;
}; };
export type JobStatusDto = { export type JobStatusDto = {
jobCounts: JobCountsDto; jobCounts: JobCountsDto;
@ -3673,7 +3670,7 @@ export enum JobCommand {
Start = "start", Start = "start",
Pause = "pause", Pause = "pause",
Resume = "resume", Resume = "resume",
Empty = "empty", Clear = "clear",
ClearFailed = "clear-failed" ClearFailed = "clear-failed"
} }
export enum MemoryType { export enum MemoryType {

446
server/package-lock.json generated
View File

@ -10,7 +10,6 @@
"hasInstallScript": true, "hasInstallScript": true,
"license": "GNU Affero General Public License version 3", "license": "GNU Affero General Public License version 3",
"dependencies": { "dependencies": {
"@nestjs/bullmq": "^11.0.1",
"@nestjs/common": "^11.0.4", "@nestjs/common": "^11.0.4",
"@nestjs/core": "^11.0.4", "@nestjs/core": "^11.0.4",
"@nestjs/event-emitter": "^3.0.0", "@nestjs/event-emitter": "^3.0.0",
@ -24,11 +23,11 @@
"@opentelemetry/exporter-prometheus": "^0.200.0", "@opentelemetry/exporter-prometheus": "^0.200.0",
"@opentelemetry/sdk-node": "^0.200.0", "@opentelemetry/sdk-node": "^0.200.0",
"@react-email/components": "^0.0.36", "@react-email/components": "^0.0.36",
"@socket.io/redis-adapter": "^8.3.0", "@socket.io/postgres-adapter": "^0.4.0",
"@types/pg": "^8.11.14",
"archiver": "^7.0.0", "archiver": "^7.0.0",
"async-lock": "^1.4.0", "async-lock": "^1.4.0",
"bcrypt": "^5.1.1", "bcrypt": "^5.1.1",
"bullmq": "^4.8.0",
"chokidar": "^3.5.3", "chokidar": "^3.5.3",
"class-transformer": "^0.5.1", "class-transformer": "^0.5.1",
"class-validator": "^0.14.0", "class-validator": "^0.14.0",
@ -39,9 +38,9 @@
"fast-glob": "^3.3.2", "fast-glob": "^3.3.2",
"fluent-ffmpeg": "^2.1.2", "fluent-ffmpeg": "^2.1.2",
"geo-tz": "^8.0.0", "geo-tz": "^8.0.0",
"graphile-worker": "^0.16.6",
"handlebars": "^4.7.8", "handlebars": "^4.7.8",
"i18n-iso-countries": "^7.6.0", "i18n-iso-countries": "^7.6.0",
"ioredis": "^5.3.2",
"joi": "^17.10.0", "joi": "^17.10.0",
"js-yaml": "^4.1.0", "js-yaml": "^4.1.0",
"kysely": "^0.28.0", "kysely": "^0.28.0",
@ -54,7 +53,7 @@
"nestjs-otel": "^6.0.0", "nestjs-otel": "^6.0.0",
"nodemailer": "^6.9.13", "nodemailer": "^6.9.13",
"openid-client": "^6.3.3", "openid-client": "^6.3.3",
"pg": "^8.11.3", "pg": "^8.15.6",
"picomatch": "^4.0.2", "picomatch": "^4.0.2",
"react": "^19.0.0", "react": "^19.0.0",
"react-dom": "^19.0.0", "react-dom": "^19.0.0",
@ -80,7 +79,6 @@
"@nestjs/testing": "^11.0.4", "@nestjs/testing": "^11.0.4",
"@swc/core": "^1.4.14", "@swc/core": "^1.4.14",
"@testcontainers/postgresql": "^10.2.1", "@testcontainers/postgresql": "^10.2.1",
"@testcontainers/redis": "^10.18.0",
"@types/archiver": "^6.0.0", "@types/archiver": "^6.0.0",
"@types/async-lock": "^1.4.2", "@types/async-lock": "^1.4.2",
"@types/bcrypt": "^5.0.0", "@types/bcrypt": "^5.0.0",
@ -1072,6 +1070,12 @@
"@nestjs/core": "^10.x || ^11.0.0" "@nestjs/core": "^10.x || ^11.0.0"
} }
}, },
"node_modules/@graphile/logger": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/@graphile/logger/-/logger-0.2.0.tgz",
"integrity": "sha512-jjcWBokl9eb1gVJ85QmoaQ73CQ52xAaOCF29ukRbYNl6lY+ts0ErTaDYOBlejcbUs2OpaiqYLO5uDhyLFzWw4w==",
"license": "MIT"
},
"node_modules/@grpc/grpc-js": { "node_modules/@grpc/grpc-js": {
"version": "1.13.3", "version": "1.13.3",
"resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.13.3.tgz", "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.13.3.tgz",
@ -1883,7 +1887,9 @@
"version": "1.2.0", "version": "1.2.0",
"resolved": "https://registry.npmjs.org/@ioredis/commands/-/commands-1.2.0.tgz", "resolved": "https://registry.npmjs.org/@ioredis/commands/-/commands-1.2.0.tgz",
"integrity": "sha512-Sx1pU8EM64o2BrqNpEO1CNLtKQwyhuXuqyfH7oGKCk+1a33d2r5saW8zNwm3j6BTExtjrv2BxTgzzkMwts6vGg==", "integrity": "sha512-Sx1pU8EM64o2BrqNpEO1CNLtKQwyhuXuqyfH7oGKCk+1a33d2r5saW8zNwm3j6BTExtjrv2BxTgzzkMwts6vGg==",
"license": "MIT" "license": "MIT",
"optional": true,
"peer": true
}, },
"node_modules/@isaacs/cliui": { "node_modules/@isaacs/cliui": {
"version": "8.0.2", "version": "8.0.2",
@ -2118,45 +2124,13 @@
"integrity": "sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==", "integrity": "sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/@msgpackr-extract/msgpackr-extract-linux-x64": { "node_modules/@msgpack/msgpack": {
"version": "3.0.3", "version": "2.8.0",
"resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-x64/-/msgpackr-extract-linux-x64-3.0.3.tgz", "resolved": "https://registry.npmjs.org/@msgpack/msgpack/-/msgpack-2.8.0.tgz",
"integrity": "sha512-cvwNfbP07pKUfq1uH+S6KJ7dT9K8WOE4ZiAcsrSes+UY55E/0jLYc+vq+DO7jlmqRb5zAggExKm0H7O/CBaesg==", "integrity": "sha512-h9u4u/jiIRKbq25PM+zymTyW6bhTzELvOoUd+AvYriWOAKpLGnIamaET3pnHYoI5iYphAHBI4ayx0MehR+VVPQ==",
"cpu": [ "license": "ISC",
"x64" "engines": {
], "node": ">= 10"
"license": "MIT",
"optional": true,
"os": [
"linux"
]
},
"node_modules/@nestjs/bull-shared": {
"version": "11.0.2",
"resolved": "https://registry.npmjs.org/@nestjs/bull-shared/-/bull-shared-11.0.2.tgz",
"integrity": "sha512-dFlttJvBqIFD6M8JVFbkrR4Feb39OTAJPJpFVILU50NOJCM4qziRw3dSNG84Q3v+7/M6xUGMFdZRRGvBBKxoSA==",
"license": "MIT",
"dependencies": {
"tslib": "2.8.1"
},
"peerDependencies": {
"@nestjs/common": "^10.0.0 || ^11.0.0",
"@nestjs/core": "^10.0.0 || ^11.0.0"
}
},
"node_modules/@nestjs/bullmq": {
"version": "11.0.2",
"resolved": "https://registry.npmjs.org/@nestjs/bullmq/-/bullmq-11.0.2.tgz",
"integrity": "sha512-Lq6lGpKkETsm0RDcUktlzsthFoE3A5QTMp2FwPi1eztKqKD6/90KS1TcnC9CJFzjpUaYnQzIMrlNs55e+/wsHA==",
"license": "MIT",
"dependencies": {
"@nestjs/bull-shared": "^11.0.2",
"tslib": "2.8.1"
},
"peerDependencies": {
"@nestjs/common": "^10.0.0 || ^11.0.0",
"@nestjs/core": "^10.0.0 || ^11.0.0",
"bullmq": "^3.0.0 || ^4.0.0 || ^5.0.0"
} }
}, },
"node_modules/@nestjs/cli": { "node_modules/@nestjs/cli": {
@ -3787,6 +3761,17 @@
"@opentelemetry/api": "^1.3.0" "@opentelemetry/api": "^1.3.0"
} }
}, },
"node_modules/@opentelemetry/instrumentation-pg/node_modules/@types/pg": {
"version": "8.6.1",
"resolved": "https://registry.npmjs.org/@types/pg/-/pg-8.6.1.tgz",
"integrity": "sha512-1Kc4oAGzAl7uqUStZCDvaLFqZrW9qWSjXOmBfdgyBP5La7Us6Mg4GBvRlSoaZMhQF/zSj1C8CtKMBkoiT8eL8w==",
"license": "MIT",
"dependencies": {
"@types/node": "*",
"pg-protocol": "*",
"pg-types": "^2.2.0"
}
},
"node_modules/@opentelemetry/instrumentation-pino": { "node_modules/@opentelemetry/instrumentation-pino": {
"version": "0.47.0", "version": "0.47.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-pino/-/instrumentation-pino-0.47.0.tgz", "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-pino/-/instrumentation-pino-0.47.0.tgz",
@ -4763,24 +4748,25 @@
"integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==", "integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/@socket.io/redis-adapter": { "node_modules/@socket.io/postgres-adapter": {
"version": "8.3.0", "version": "0.4.0",
"resolved": "https://registry.npmjs.org/@socket.io/redis-adapter/-/redis-adapter-8.3.0.tgz", "resolved": "https://registry.npmjs.org/@socket.io/postgres-adapter/-/postgres-adapter-0.4.0.tgz",
"integrity": "sha512-ly0cra+48hDmChxmIpnESKrc94LjRL80TEmZVscuQ/WWkRP81nNj8W8cCGMqbI4L6NCuAaPRSzZF1a9GlAxxnA==", "integrity": "sha512-FJQslCIchoT4oMHk0D8HeSi9nhAOE8/snId65zI10ykZsk3MQJnUH45+Jqd75IuQhtxxwrvNxqHmzLJEPw9PnA==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"debug": "~4.3.1", "@msgpack/msgpack": "~2.8.0",
"notepack.io": "~3.0.1", "@types/pg": "^8.6.6",
"uid2": "1.0.0" "debug": "~4.3.4",
"pg": "^8.9.0"
}, },
"engines": { "engines": {
"node": ">=10.0.0" "node": ">=12.0.0"
}, },
"peerDependencies": { "peerDependencies": {
"socket.io-adapter": "^2.5.4" "socket.io-adapter": "^2.5.4"
} }
}, },
"node_modules/@socket.io/redis-adapter/node_modules/debug": { "node_modules/@socket.io/postgres-adapter/node_modules/debug": {
"version": "4.3.7", "version": "4.3.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz",
"integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==",
@ -4914,16 +4900,6 @@
"testcontainers": "^10.24.2" "testcontainers": "^10.24.2"
} }
}, },
"node_modules/@testcontainers/redis": {
"version": "10.24.2",
"resolved": "https://registry.npmjs.org/@testcontainers/redis/-/redis-10.24.2.tgz",
"integrity": "sha512-m4/FZW5ltZPaK9pQTKNipjpBk73Vdj7Ql3sFr26A9dOr0wJyM3Wnc9jeHTNRal7RDnY5rvumXAIUWbBlvKMJEw==",
"dev": true,
"license": "MIT",
"dependencies": {
"testcontainers": "^10.24.2"
}
},
"node_modules/@tokenizer/inflate": { "node_modules/@tokenizer/inflate": {
"version": "0.2.7", "version": "0.2.7",
"resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz", "resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz",
@ -5089,6 +5065,15 @@
"@types/node": "*" "@types/node": "*"
} }
}, },
"node_modules/@types/debug": {
"version": "4.1.12",
"resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
"integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==",
"license": "MIT",
"dependencies": {
"@types/ms": "*"
}
},
"node_modules/@types/docker-modem": { "node_modules/@types/docker-modem": {
"version": "3.0.6", "version": "3.0.6",
"resolved": "https://registry.npmjs.org/@types/docker-modem/-/docker-modem-3.0.6.tgz", "resolved": "https://registry.npmjs.org/@types/docker-modem/-/docker-modem-3.0.6.tgz",
@ -5201,6 +5186,15 @@
"rxjs": "^7.2.0" "rxjs": "^7.2.0"
} }
}, },
"node_modules/@types/interpret": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@types/interpret/-/interpret-1.1.3.tgz",
"integrity": "sha512-uBaBhj/BhilG58r64mtDb/BEdH51HIQLgP5bmWzc5qCtFMja8dCk/IOJmk36j0lbi9QHwI6sbtUNGuqXdKCAtQ==",
"license": "MIT",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/js-yaml": { "node_modules/@types/js-yaml": {
"version": "4.0.9", "version": "4.0.9",
"resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-4.0.9.tgz", "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-4.0.9.tgz",
@ -5261,6 +5255,12 @@
"@types/node": "*" "@types/node": "*"
} }
}, },
"node_modules/@types/ms": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz",
"integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==",
"license": "MIT"
},
"node_modules/@types/multer": { "node_modules/@types/multer": {
"version": "1.4.12", "version": "1.4.12",
"resolved": "https://registry.npmjs.org/@types/multer/-/multer-1.4.12.tgz", "resolved": "https://registry.npmjs.org/@types/multer/-/multer-1.4.12.tgz",
@ -5317,14 +5317,14 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/@types/pg": { "node_modules/@types/pg": {
"version": "8.6.1", "version": "8.11.14",
"resolved": "https://registry.npmjs.org/@types/pg/-/pg-8.6.1.tgz", "resolved": "https://registry.npmjs.org/@types/pg/-/pg-8.11.14.tgz",
"integrity": "sha512-1Kc4oAGzAl7uqUStZCDvaLFqZrW9qWSjXOmBfdgyBP5La7Us6Mg4GBvRlSoaZMhQF/zSj1C8CtKMBkoiT8eL8w==", "integrity": "sha512-qyD11E5R3u0eJmd1lB0WnWKXJGA7s015nyARWljfz5DcX83TKAIlY+QrmvzQTsbIe+hkiFtkyL2gHC6qwF6Fbg==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@types/node": "*", "@types/node": "*",
"pg-protocol": "*", "pg-protocol": "*",
"pg-types": "^2.2.0" "pg-types": "^4.0.1"
} }
}, },
"node_modules/@types/pg-pool": { "node_modules/@types/pg-pool": {
@ -5336,6 +5336,63 @@
"@types/pg": "*" "@types/pg": "*"
} }
}, },
"node_modules/@types/pg/node_modules/pg-types": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/pg-types/-/pg-types-4.0.2.tgz",
"integrity": "sha512-cRL3JpS3lKMGsKaWndugWQoLOCoP+Cic8oseVcbr0qhPzYD5DWXK+RZ9LY9wxRf7RQia4SCwQlXk0q6FCPrVng==",
"license": "MIT",
"dependencies": {
"pg-int8": "1.0.1",
"pg-numeric": "1.0.2",
"postgres-array": "~3.0.1",
"postgres-bytea": "~3.0.0",
"postgres-date": "~2.1.0",
"postgres-interval": "^3.0.0",
"postgres-range": "^1.1.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/@types/pg/node_modules/postgres-array": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-3.0.4.tgz",
"integrity": "sha512-nAUSGfSDGOaOAEGwqsRY27GPOea7CNipJPOA7lPbdEpx5Kg3qzdP0AaWC5MlhTWV9s4hFX39nomVZ+C4tnGOJQ==",
"license": "MIT",
"engines": {
"node": ">=12"
}
},
"node_modules/@types/pg/node_modules/postgres-bytea": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-3.0.0.tgz",
"integrity": "sha512-CNd4jim9RFPkObHSjVHlVrxoVQXz7quwNFpz7RY1okNNme49+sVyiTvTRobiLV548Hx/hb1BG+iE7h9493WzFw==",
"license": "MIT",
"dependencies": {
"obuf": "~1.1.2"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/@types/pg/node_modules/postgres-date": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-2.1.0.tgz",
"integrity": "sha512-K7Juri8gtgXVcDfZttFKVmhglp7epKb1K4pgrkLxehjqkrgPhfG6OO8LHLkfaqkbpjNRnra018XwAr1yQFWGcA==",
"license": "MIT",
"engines": {
"node": ">=12"
}
},
"node_modules/@types/pg/node_modules/postgres-interval": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-3.0.0.tgz",
"integrity": "sha512-BSNDnbyZCXSxgA+1f5UU2GmwhoI0aU5yMxRGO8CdFEcY2BQF9xm/7MqKnYoM1nJDk8nONNWDk9WeSmePFhQdlw==",
"license": "MIT",
"engines": {
"node": ">=12"
}
},
"node_modules/@types/picomatch": { "node_modules/@types/picomatch": {
"version": "3.0.2", "version": "3.0.2",
"resolved": "https://registry.npmjs.org/@types/picomatch/-/picomatch-3.0.2.tgz", "resolved": "https://registry.npmjs.org/@types/picomatch/-/picomatch-3.0.2.tgz",
@ -5401,7 +5458,6 @@
"version": "7.7.0", "version": "7.7.0",
"resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.7.0.tgz", "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.7.0.tgz",
"integrity": "sha512-k107IF4+Xr7UHjwDc7Cfd6PRQfbdkiRabXGRjo07b4WyPahFBZCZ1sE+BNxYIJPPg73UkfOsVOLwqVc/6ETrIA==", "integrity": "sha512-k107IF4+Xr7UHjwDc7Cfd6PRQfbdkiRabXGRjo07b4WyPahFBZCZ1sE+BNxYIJPPg73UkfOsVOLwqVc/6ETrIA==",
"dev": true,
"license": "MIT" "license": "MIT"
}, },
"node_modules/@types/send": { "node_modules/@types/send": {
@ -6885,64 +6941,6 @@
"url": "https://github.com/sponsors/sindresorhus" "url": "https://github.com/sponsors/sindresorhus"
} }
}, },
"node_modules/bullmq": {
"version": "4.18.2",
"resolved": "https://registry.npmjs.org/bullmq/-/bullmq-4.18.2.tgz",
"integrity": "sha512-Cx0O98IlGiFw7UBa+zwGz+nH0Pcl1wfTvMVBlsMna3s0219hXroVovh1xPRgomyUcbyciHiugGCkW0RRNZDHYQ==",
"license": "MIT",
"dependencies": {
"cron-parser": "^4.6.0",
"glob": "^8.0.3",
"ioredis": "^5.3.2",
"lodash": "^4.17.21",
"msgpackr": "^1.6.2",
"node-abort-controller": "^3.1.1",
"semver": "^7.5.4",
"tslib": "^2.0.0",
"uuid": "^9.0.0"
}
},
"node_modules/bullmq/node_modules/brace-expansion": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/bullmq/node_modules/glob": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz",
"integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==",
"deprecated": "Glob versions prior to v9 are no longer supported",
"license": "ISC",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^5.0.1",
"once": "^1.3.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/bullmq/node_modules/minimatch": {
"version": "5.1.6",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz",
"integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==",
"license": "ISC",
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/busboy": { "node_modules/busboy": {
"version": "1.6.0", "version": "1.6.0",
"resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz",
@ -7530,6 +7528,8 @@
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz", "resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz",
"integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==", "integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==",
"license": "Apache-2.0", "license": "Apache-2.0",
"optional": true,
"peer": true,
"engines": { "engines": {
"node": ">=0.10.0" "node": ">=0.10.0"
} }
@ -7932,18 +7932,6 @@
"luxon": "~3.5.0" "luxon": "~3.5.0"
} }
}, },
"node_modules/cron-parser": {
"version": "4.9.0",
"resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-4.9.0.tgz",
"integrity": "sha512-p0SaNjrHOnQeR8/VnfGbmg9te2kfyYSQ7Sc/j/6DtPL3JQvKxmjO9TSjNFpujqV3vEYYBvNNvXSxzyksBWAx1Q==",
"license": "MIT",
"dependencies": {
"luxon": "^3.2.1"
},
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/cron/node_modules/luxon": { "node_modules/cron/node_modules/luxon": {
"version": "3.5.0", "version": "3.5.0",
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.5.0.tgz", "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.5.0.tgz",
@ -8170,6 +8158,8 @@
"resolved": "https://registry.npmjs.org/denque/-/denque-2.1.0.tgz", "resolved": "https://registry.npmjs.org/denque/-/denque-2.1.0.tgz",
"integrity": "sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==", "integrity": "sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==",
"license": "Apache-2.0", "license": "Apache-2.0",
"optional": true,
"peer": true,
"engines": { "engines": {
"node": ">=0.10" "node": ">=0.10"
} }
@ -10059,6 +10049,64 @@
"dev": true, "dev": true,
"license": "MIT" "license": "MIT"
}, },
"node_modules/graphile-config": {
"version": "0.0.1-beta.15",
"resolved": "https://registry.npmjs.org/graphile-config/-/graphile-config-0.0.1-beta.15.tgz",
"integrity": "sha512-J+hYqhZlx5yY7XdU7XjOAqNCAUZU33fEx3PdkNc1cfAAbo1TNMWiib4DFH5XkT8BagJtTyFrMnDCuKxnphCu+g==",
"license": "MIT",
"dependencies": {
"@types/interpret": "^1.1.1",
"@types/node": "^20.5.7",
"@types/semver": "^7.5.1",
"chalk": "^4.1.2",
"debug": "^4.3.4",
"interpret": "^3.1.1",
"semver": "^7.5.4",
"tslib": "^2.6.2",
"yargs": "^17.7.2"
},
"engines": {
"node": ">=16"
}
},
"node_modules/graphile-config/node_modules/@types/node": {
"version": "20.17.32",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.17.32.tgz",
"integrity": "sha512-zeMXFn8zQ+UkjK4ws0RiOC9EWByyW1CcVmLe+2rQocXRsGEDxUCwPEIVgpsGcLHS/P8JkT0oa3839BRABS0oPw==",
"license": "MIT",
"dependencies": {
"undici-types": "~6.19.2"
}
},
"node_modules/graphile-config/node_modules/undici-types": {
"version": "6.19.8",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz",
"integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==",
"license": "MIT"
},
"node_modules/graphile-worker": {
"version": "0.16.6",
"resolved": "https://registry.npmjs.org/graphile-worker/-/graphile-worker-0.16.6.tgz",
"integrity": "sha512-e7gGYDmGqzju2l83MpzX8vNG/lOtVJiSzI3eZpAFubSxh/cxs7sRrRGBGjzBP1kNG0H+c95etPpNRNlH65PYhw==",
"license": "MIT",
"dependencies": {
"@graphile/logger": "^0.2.0",
"@types/debug": "^4.1.10",
"@types/pg": "^8.10.5",
"cosmiconfig": "^8.3.6",
"graphile-config": "^0.0.1-beta.4",
"json5": "^2.2.3",
"pg": "^8.11.3",
"tslib": "^2.6.2",
"yargs": "^17.7.2"
},
"bin": {
"graphile-worker": "dist/cli.js"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/handlebars": { "node_modules/handlebars": {
"version": "4.7.8", "version": "4.7.8",
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz",
@ -10501,11 +10549,22 @@
"node": ">=8" "node": ">=8"
} }
}, },
"node_modules/interpret": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/interpret/-/interpret-3.1.1.tgz",
"integrity": "sha512-6xwYfHbajpoF0xLW+iwLkhwgvLoZDfjYfoFNu8ftMoXINzwuymNLd9u/KmwtdT2GbR+/Cz66otEGEVVUHX9QLQ==",
"license": "MIT",
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/ioredis": { "node_modules/ioredis": {
"version": "5.6.1", "version": "5.6.1",
"resolved": "https://registry.npmjs.org/ioredis/-/ioredis-5.6.1.tgz", "resolved": "https://registry.npmjs.org/ioredis/-/ioredis-5.6.1.tgz",
"integrity": "sha512-UxC0Yv1Y4WRJiGQxQkP0hfdL0/5/6YvdfOOClRgJ0qppSarkhneSa6UvkMkms0AkdGimSH3Ikqm+6mkMmX7vGA==", "integrity": "sha512-UxC0Yv1Y4WRJiGQxQkP0hfdL0/5/6YvdfOOClRgJ0qppSarkhneSa6UvkMkms0AkdGimSH3Ikqm+6mkMmX7vGA==",
"license": "MIT", "license": "MIT",
"optional": true,
"peer": true,
"dependencies": { "dependencies": {
"@ioredis/commands": "^1.1.1", "@ioredis/commands": "^1.1.1",
"cluster-key-slot": "^1.1.0", "cluster-key-slot": "^1.1.0",
@ -11375,13 +11434,17 @@
"version": "4.2.0", "version": "4.2.0",
"resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz", "resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz",
"integrity": "sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ==", "integrity": "sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ==",
"license": "MIT" "license": "MIT",
"optional": true,
"peer": true
}, },
"node_modules/lodash.isarguments": { "node_modules/lodash.isarguments": {
"version": "3.1.0", "version": "3.1.0",
"resolved": "https://registry.npmjs.org/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz", "resolved": "https://registry.npmjs.org/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz",
"integrity": "sha512-chi4NHZlZqZD18a0imDHnZPrDeBbTtVN7GXMwuGdRH9qotxAjYs3aVLKc7zNOG9eddR5Ksd8rvFEBc9SsggPpg==", "integrity": "sha512-chi4NHZlZqZD18a0imDHnZPrDeBbTtVN7GXMwuGdRH9qotxAjYs3aVLKc7zNOG9eddR5Ksd8rvFEBc9SsggPpg==",
"license": "MIT" "license": "MIT",
"optional": true,
"peer": true
}, },
"node_modules/lodash.merge": { "node_modules/lodash.merge": {
"version": "4.6.2", "version": "4.6.2",
@ -11908,37 +11971,6 @@
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/msgpackr": {
"version": "1.11.2",
"resolved": "https://registry.npmjs.org/msgpackr/-/msgpackr-1.11.2.tgz",
"integrity": "sha512-F9UngXRlPyWCDEASDpTf6c9uNhGPTqnTeLVt7bN+bU1eajoR/8V9ys2BRaV5C/e5ihE6sJ9uPIKaYt6bFuO32g==",
"license": "MIT",
"optionalDependencies": {
"msgpackr-extract": "^3.0.2"
}
},
"node_modules/msgpackr-extract": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/msgpackr-extract/-/msgpackr-extract-3.0.3.tgz",
"integrity": "sha512-P0efT1C9jIdVRefqjzOQ9Xml57zpOXnIuS+csaB4MdZbTdmGDLo8XhzBG1N7aO11gKDDkJvBLULeFTo46wwreA==",
"hasInstallScript": true,
"license": "MIT",
"optional": true,
"dependencies": {
"node-gyp-build-optional-packages": "5.2.2"
},
"bin": {
"download-msgpackr-prebuilds": "bin/download-prebuilds.js"
},
"optionalDependencies": {
"@msgpackr-extract/msgpackr-extract-darwin-arm64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-darwin-x64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-linux-arm": "3.0.3",
"@msgpackr-extract/msgpackr-extract-linux-arm64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-linux-x64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-win32-x64": "3.0.3"
}
},
"node_modules/multer": { "node_modules/multer": {
"version": "1.4.5-lts.2", "version": "1.4.5-lts.2",
"resolved": "https://registry.npmjs.org/multer/-/multer-1.4.5-lts.2.tgz", "resolved": "https://registry.npmjs.org/multer/-/multer-1.4.5-lts.2.tgz",
@ -12299,6 +12331,7 @@
"version": "3.1.1", "version": "3.1.1",
"resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.1.1.tgz", "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.1.1.tgz",
"integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==", "integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==",
"dev": true,
"license": "MIT" "license": "MIT"
}, },
"node_modules/node-addon-api": { "node_modules/node-addon-api": {
@ -12366,21 +12399,6 @@
"node": "^18.17.0 || >=20.5.0" "node": "^18.17.0 || >=20.5.0"
} }
}, },
"node_modules/node-gyp-build-optional-packages": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/node-gyp-build-optional-packages/-/node-gyp-build-optional-packages-5.2.2.tgz",
"integrity": "sha512-s+w+rBWnpTMwSFbaE0UXsRlg7hU4FjekKU4eyAih5T8nJuNZT1nNsskXpxmeqSK9UzkBl6UgRlnKc8hz8IEqOw==",
"license": "MIT",
"optional": true,
"dependencies": {
"detect-libc": "^2.0.1"
},
"bin": {
"node-gyp-build-optional-packages": "bin.js",
"node-gyp-build-optional-packages-optional": "optional.js",
"node-gyp-build-optional-packages-test": "build-test.js"
}
},
"node_modules/node-gyp/node_modules/abbrev": { "node_modules/node-gyp/node_modules/abbrev": {
"version": "3.0.1", "version": "3.0.1",
"resolved": "https://registry.npmjs.org/abbrev/-/abbrev-3.0.1.tgz", "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-3.0.1.tgz",
@ -12554,12 +12572,6 @@
"node": ">=0.10.0" "node": ">=0.10.0"
} }
}, },
"node_modules/notepack.io": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/notepack.io/-/notepack.io-3.0.1.tgz",
"integrity": "sha512-TKC/8zH5pXIAMVQio2TvVDTtPRX+DJPHDqjRbxogtFiByHyzKmy96RA0JtCQJ+WouyyL4A10xomQzgbUT+1jCg==",
"license": "MIT"
},
"node_modules/npmlog": { "node_modules/npmlog": {
"version": "5.0.1", "version": "5.0.1",
"resolved": "https://registry.npmjs.org/npmlog/-/npmlog-5.0.1.tgz", "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-5.0.1.tgz",
@ -12629,6 +12641,12 @@
"node": ">= 0.4" "node": ">= 0.4"
} }
}, },
"node_modules/obuf": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz",
"integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==",
"license": "MIT"
},
"node_modules/on-finished": { "node_modules/on-finished": {
"version": "2.4.1", "version": "2.4.1",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
@ -13151,13 +13169,13 @@
} }
}, },
"node_modules/pg": { "node_modules/pg": {
"version": "8.15.5", "version": "8.15.6",
"resolved": "https://registry.npmjs.org/pg/-/pg-8.15.5.tgz", "resolved": "https://registry.npmjs.org/pg/-/pg-8.15.6.tgz",
"integrity": "sha512-EpAhHFQc+aH9VfeffWIVC+XXk6lmAhS9W1FxtxcPXs94yxhrI1I6w/zkWfIOII/OkBv3Be04X3xMOj0kQ78l6w==", "integrity": "sha512-yvao7YI3GdmmrslNVsZgx9PfntfWrnXwtR+K/DjI0I/sTKif4Z623um+sjVZ1hk5670B+ODjvHDAckKdjmPTsg==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"pg-connection-string": "^2.8.5", "pg-connection-string": "^2.8.5",
"pg-pool": "^3.9.5", "pg-pool": "^3.9.6",
"pg-protocol": "^1.9.5", "pg-protocol": "^1.9.5",
"pg-types": "^2.1.0", "pg-types": "^2.1.0",
"pgpass": "1.x" "pgpass": "1.x"
@ -13199,6 +13217,15 @@
"node": ">=4.0.0" "node": ">=4.0.0"
} }
}, },
"node_modules/pg-numeric": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/pg-numeric/-/pg-numeric-1.0.2.tgz",
"integrity": "sha512-BM/Thnrw5jm2kKLE5uJkXqqExRUY/toLHda65XgFTBTFYZyopbKjBe29Ii3RbkvlsMoFwD+tHeGaCjjv0gHlyw==",
"license": "ISC",
"engines": {
"node": ">=4"
}
},
"node_modules/pg-pool": { "node_modules/pg-pool": {
"version": "3.9.6", "version": "3.9.6",
"resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.9.6.tgz", "resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.9.6.tgz",
@ -13508,6 +13535,12 @@
"node": ">=0.10.0" "node": ">=0.10.0"
} }
}, },
"node_modules/postgres-range": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/postgres-range/-/postgres-range-1.1.4.tgz",
"integrity": "sha512-i/hbxIE9803Alj/6ytL7UHQxRvZkI9O4Sy+J3HGc4F4oo/2eQAjTSNJ0bfxyse3bH0nuVesCk+3IRLaMtG3H6w==",
"license": "MIT"
},
"node_modules/prelude-ls": { "node_modules/prelude-ls": {
"version": "1.2.1", "version": "1.2.1",
"resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
@ -14219,6 +14252,8 @@
"resolved": "https://registry.npmjs.org/redis-errors/-/redis-errors-1.2.0.tgz", "resolved": "https://registry.npmjs.org/redis-errors/-/redis-errors-1.2.0.tgz",
"integrity": "sha512-1qny3OExCf0UvUV/5wpYKf2YwPcOqXzkwKKSmKHiE6ZMQs5heeE/c8eXK+PNllPvmjgAbfnsbpkGZWy8cBpn9w==", "integrity": "sha512-1qny3OExCf0UvUV/5wpYKf2YwPcOqXzkwKKSmKHiE6ZMQs5heeE/c8eXK+PNllPvmjgAbfnsbpkGZWy8cBpn9w==",
"license": "MIT", "license": "MIT",
"optional": true,
"peer": true,
"engines": { "engines": {
"node": ">=4" "node": ">=4"
} }
@ -14228,6 +14263,8 @@
"resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-3.0.0.tgz", "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-3.0.0.tgz",
"integrity": "sha512-DJnGAeenTdpMEH6uAJRK/uiyEIH9WVsUmoLwzudwGJUwZPp80PDBWPHXSAGNPwNvIXAbe7MSUB1zQFugFml66A==", "integrity": "sha512-DJnGAeenTdpMEH6uAJRK/uiyEIH9WVsUmoLwzudwGJUwZPp80PDBWPHXSAGNPwNvIXAbe7MSUB1zQFugFml66A==",
"license": "MIT", "license": "MIT",
"optional": true,
"peer": true,
"dependencies": { "dependencies": {
"redis-errors": "^1.0.0" "redis-errors": "^1.0.0"
}, },
@ -15346,7 +15383,9 @@
"version": "2.1.0", "version": "2.1.0",
"resolved": "https://registry.npmjs.org/standard-as-callback/-/standard-as-callback-2.1.0.tgz", "resolved": "https://registry.npmjs.org/standard-as-callback/-/standard-as-callback-2.1.0.tgz",
"integrity": "sha512-qoRRSyROncaz1z0mvYqIE4lCd9p2R90i6GxW3uZv5ucSu8tU7B5HXUP1gG8pVZsYNVaXjk8ClXHPttLyxAL48A==", "integrity": "sha512-qoRRSyROncaz1z0mvYqIE4lCd9p2R90i6GxW3uZv5ucSu8tU7B5HXUP1gG8pVZsYNVaXjk8ClXHPttLyxAL48A==",
"license": "MIT" "license": "MIT",
"optional": true,
"peer": true
}, },
"node_modules/statuses": { "node_modules/statuses": {
"version": "2.0.1", "version": "2.0.1",
@ -16965,15 +17004,6 @@
"node": ">=8" "node": ">=8"
} }
}, },
"node_modules/uid2": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/uid2/-/uid2-1.0.0.tgz",
"integrity": "sha512-+I6aJUv63YAcY9n4mQreLUt0d4lvwkkopDNmpomkAUz0fAkEMV9pRWxN0EjhW1YfRhcuyHg2v3mwddCDW1+LFQ==",
"license": "MIT",
"engines": {
"node": ">= 4.0.0"
}
},
"node_modules/uint8array-extras": { "node_modules/uint8array-extras": {
"version": "1.4.0", "version": "1.4.0",
"resolved": "https://registry.npmjs.org/uint8array-extras/-/uint8array-extras-1.4.0.tgz", "resolved": "https://registry.npmjs.org/uint8array-extras/-/uint8array-extras-1.4.0.tgz",

View File

@ -35,7 +35,6 @@
"postinstall": "patch-package" "postinstall": "patch-package"
}, },
"dependencies": { "dependencies": {
"@nestjs/bullmq": "^11.0.1",
"@nestjs/common": "^11.0.4", "@nestjs/common": "^11.0.4",
"@nestjs/core": "^11.0.4", "@nestjs/core": "^11.0.4",
"@nestjs/event-emitter": "^3.0.0", "@nestjs/event-emitter": "^3.0.0",
@ -49,11 +48,11 @@
"@opentelemetry/exporter-prometheus": "^0.200.0", "@opentelemetry/exporter-prometheus": "^0.200.0",
"@opentelemetry/sdk-node": "^0.200.0", "@opentelemetry/sdk-node": "^0.200.0",
"@react-email/components": "^0.0.36", "@react-email/components": "^0.0.36",
"@socket.io/redis-adapter": "^8.3.0", "@socket.io/postgres-adapter": "^0.4.0",
"@types/pg": "^8.11.14",
"archiver": "^7.0.0", "archiver": "^7.0.0",
"async-lock": "^1.4.0", "async-lock": "^1.4.0",
"bcrypt": "^5.1.1", "bcrypt": "^5.1.1",
"bullmq": "^4.8.0",
"chokidar": "^3.5.3", "chokidar": "^3.5.3",
"class-transformer": "^0.5.1", "class-transformer": "^0.5.1",
"class-validator": "^0.14.0", "class-validator": "^0.14.0",
@ -64,9 +63,9 @@
"fast-glob": "^3.3.2", "fast-glob": "^3.3.2",
"fluent-ffmpeg": "^2.1.2", "fluent-ffmpeg": "^2.1.2",
"geo-tz": "^8.0.0", "geo-tz": "^8.0.0",
"graphile-worker": "^0.16.6",
"handlebars": "^4.7.8", "handlebars": "^4.7.8",
"i18n-iso-countries": "^7.6.0", "i18n-iso-countries": "^7.6.0",
"ioredis": "^5.3.2",
"joi": "^17.10.0", "joi": "^17.10.0",
"js-yaml": "^4.1.0", "js-yaml": "^4.1.0",
"kysely": "^0.28.0", "kysely": "^0.28.0",
@ -79,7 +78,7 @@
"nestjs-otel": "^6.0.0", "nestjs-otel": "^6.0.0",
"nodemailer": "^6.9.13", "nodemailer": "^6.9.13",
"openid-client": "^6.3.3", "openid-client": "^6.3.3",
"pg": "^8.11.3", "pg": "^8.15.6",
"picomatch": "^4.0.2", "picomatch": "^4.0.2",
"react": "^19.0.0", "react": "^19.0.0",
"react-dom": "^19.0.0", "react-dom": "^19.0.0",
@ -105,7 +104,6 @@
"@nestjs/testing": "^11.0.4", "@nestjs/testing": "^11.0.4",
"@swc/core": "^1.4.14", "@swc/core": "^1.4.14",
"@testcontainers/postgresql": "^10.2.1", "@testcontainers/postgresql": "^10.2.1",
"@testcontainers/redis": "^10.18.0",
"@types/archiver": "^6.0.0", "@types/archiver": "^6.0.0",
"@types/async-lock": "^1.4.2", "@types/async-lock": "^1.4.2",
"@types/bcrypt": "^5.0.0", "@types/bcrypt": "^5.0.0",

View File

@ -1,4 +1,3 @@
import { BullModule } from '@nestjs/bullmq';
import { Inject, Module, OnModuleDestroy, OnModuleInit, ValidationPipe } from '@nestjs/common'; import { Inject, Module, OnModuleDestroy, OnModuleInit, ValidationPipe } from '@nestjs/common';
import { APP_FILTER, APP_GUARD, APP_INTERCEPTOR, APP_PIPE } from '@nestjs/core'; import { APP_FILTER, APP_GUARD, APP_INTERCEPTOR, APP_PIPE } from '@nestjs/core';
import { ScheduleModule, SchedulerRegistry } from '@nestjs/schedule'; import { ScheduleModule, SchedulerRegistry } from '@nestjs/schedule';
@ -37,11 +36,9 @@ export const middleware = [
]; ];
const configRepository = new ConfigRepository(); const configRepository = new ConfigRepository();
const { bull, cls, database, otel } = configRepository.getEnv(); const { cls, database, otel } = configRepository.getEnv();
const imports = [ const imports = [
BullModule.forRoot(bull.config),
BullModule.registerQueue(...bull.queues),
ClsModule.forRoot(cls.config), ClsModule.forRoot(cls.config),
OpenTelemetryModule.forRoot(otel), OpenTelemetryModule.forRoot(otel),
KyselyModule.forRoot(getKyselyConfig(database.config)), KyselyModule.forRoot(getKyselyConfig(database.config)),

27
server/src/db.d.ts vendored
View File

@ -236,6 +236,30 @@ export interface GeodataPlaces {
name: string; name: string;
} }
export interface GraphileWorkerJobs {
id: Generated<string>;
task_identifier: string;
locked_at: Timestamp | null;
locked_by: string | null;
run_at: Timestamp | null;
attempts: number;
max_attempts: number;
}
export interface GraphileWorkerPrivateJobs {
id: Generated<string>;
task_id: string;
locked_at: Timestamp | null;
locked_by: string | null;
attempts: number;
max_attempts: number;
}
export interface GraphileWorkerPrivateTasks {
id: Generated<string>;
identifier: string;
}
export interface Libraries { export interface Libraries {
createdAt: Generated<Timestamp>; createdAt: Generated<Timestamp>;
deletedAt: Timestamp | null; deletedAt: Timestamp | null;
@ -476,6 +500,9 @@ export interface DB {
exif: Exif; exif: Exif;
face_search: FaceSearch; face_search: FaceSearch;
geodata_places: GeodataPlaces; geodata_places: GeodataPlaces;
'graphile_worker.jobs': GraphileWorkerJobs;
'graphile_worker._private_jobs': GraphileWorkerPrivateJobs;
'graphile_worker._private_tasks': GraphileWorkerPrivateTasks;
libraries: Libraries; libraries: Libraries;
memories: Memories; memories: Memories;
memories_assets_assets: MemoriesAssetsAssets; memories_assets_assets: MemoriesAssetsAssets;

View File

@ -157,34 +157,4 @@ export class EnvDto {
@IsString() @IsString()
@Optional() @Optional()
NO_COLOR?: string; NO_COLOR?: string;
@IsString()
@Optional()
REDIS_HOSTNAME?: string;
@IsInt()
@Optional()
@Type(() => Number)
REDIS_PORT?: number;
@IsInt()
@Optional()
@Type(() => Number)
REDIS_DBINDEX?: number;
@IsString()
@Optional()
REDIS_USERNAME?: string;
@IsString()
@Optional()
REDIS_PASSWORD?: string;
@IsString()
@Optional()
REDIS_SOCKET?: string;
@IsString()
@Optional()
REDIS_URL?: string;
} }

View File

@ -30,20 +30,15 @@ export class JobCountsDto {
@ApiProperty({ type: 'integer' }) @ApiProperty({ type: 'integer' })
active!: number; active!: number;
@ApiProperty({ type: 'integer' }) @ApiProperty({ type: 'integer' })
completed!: number; waiting!: number;
@ApiProperty({ type: 'integer' })
failed!: number;
@ApiProperty({ type: 'integer' }) @ApiProperty({ type: 'integer' })
delayed!: number; delayed!: number;
@ApiProperty({ type: 'integer' }) @ApiProperty({ type: 'integer' })
waiting!: number; failed!: number;
@ApiProperty({ type: 'integer' })
paused!: number;
} }
export class QueueStatusDto { export class QueueStatusDto {
isActive!: boolean; paused!: boolean;
isPaused!: boolean;
} }
export class JobStatusDto { export class JobStatusDto {

View File

@ -204,6 +204,7 @@ export enum SystemMetadataKey {
SYSTEM_FLAGS = 'system-flags', SYSTEM_FLAGS = 'system-flags',
VERSION_CHECK_STATE = 'version-check-state', VERSION_CHECK_STATE = 'version-check-state',
LICENSE = 'license', LICENSE = 'license',
QUEUES_STATE = 'queues-state',
} }
export enum UserMetadataKey { export enum UserMetadataKey {
@ -533,10 +534,20 @@ export enum JobName {
} }
export enum JobCommand { export enum JobCommand {
// The behavior of start depends on the queue. Usually it is a request to
// reprocess everything associated with the queue from scratch.
START = 'start', START = 'start',
// Pause prevents workers from processing jobs.
PAUSE = 'pause', PAUSE = 'pause',
// Resume allows workers to continue processing jobs.
RESUME = 'resume', RESUME = 'resume',
EMPTY = 'empty',
// Clear removes all pending jobs.
CLEAR = 'clear',
// ClearFailed removes all failed jobs.
CLEAR_FAILED = 'clear-failed', CLEAR_FAILED = 'clear-failed',
} }

View File

@ -1,9 +1,10 @@
import { INestApplicationContext } from '@nestjs/common'; import { INestApplicationContext } from '@nestjs/common';
import { IoAdapter } from '@nestjs/platform-socket.io'; import { IoAdapter } from '@nestjs/platform-socket.io';
import { createAdapter } from '@socket.io/redis-adapter'; import { createAdapter } from '@socket.io/postgres-adapter';
import { Redis } from 'ioredis'; import pg, { PoolConfig } from 'pg';
import { ServerOptions } from 'socket.io'; import { ServerOptions } from 'socket.io';
import { ConfigRepository } from 'src/repositories/config.repository'; import { ConfigRepository } from 'src/repositories/config.repository';
import { asPostgresConnectionConfig } from 'src/utils/database';
export class WebSocketAdapter extends IoAdapter { export class WebSocketAdapter extends IoAdapter {
constructor(private app: INestApplicationContext) { constructor(private app: INestApplicationContext) {
@ -11,11 +12,11 @@ export class WebSocketAdapter extends IoAdapter {
} }
createIOServer(port: number, options?: ServerOptions): any { createIOServer(port: number, options?: ServerOptions): any {
const { redis } = this.app.get(ConfigRepository).getEnv();
const server = super.createIOServer(port, options); const server = super.createIOServer(port, options);
const pubClient = new Redis(redis); const configRepository = new ConfigRepository();
const subClient = pubClient.duplicate(); const { database } = configRepository.getEnv();
server.adapter(createAdapter(pubClient, subClient)); const pool = new pg.Pool(asPostgresConnectionConfig(database.config) as PoolConfig);
server.adapter(createAdapter(pool));
return server; return server;
} }
} }

View File

@ -26,38 +26,12 @@ const resetEnv = () => {
'DB_SKIP_MIGRATIONS', 'DB_SKIP_MIGRATIONS',
'DB_VECTOR_EXTENSION', 'DB_VECTOR_EXTENSION',
'REDIS_HOSTNAME',
'REDIS_PORT',
'REDIS_DBINDEX',
'REDIS_USERNAME',
'REDIS_PASSWORD',
'REDIS_SOCKET',
'REDIS_URL',
'NO_COLOR', 'NO_COLOR',
]) { ]) {
delete process.env[env]; delete process.env[env];
} }
}; };
const sentinelConfig = {
sentinels: [
{
host: 'redis-sentinel-node-0',
port: 26_379,
},
{
host: 'redis-sentinel-node-1',
port: 26_379,
},
{
host: 'redis-sentinel-node-2',
port: 26_379,
},
],
name: 'redis-sentinel',
};
describe('getEnv', () => { describe('getEnv', () => {
beforeEach(() => { beforeEach(() => {
resetEnv(); resetEnv();
@ -108,34 +82,6 @@ describe('getEnv', () => {
}); });
}); });
describe('redis', () => {
it('should use defaults', () => {
const { redis } = getEnv();
expect(redis).toEqual({
host: 'redis',
port: 6379,
db: 0,
username: undefined,
password: undefined,
path: undefined,
});
});
it('should parse base64 encoded config, ignore other env', () => {
process.env.REDIS_URL = `ioredis://${Buffer.from(JSON.stringify(sentinelConfig)).toString('base64')}`;
process.env.REDIS_HOSTNAME = 'redis-host';
process.env.REDIS_USERNAME = 'redis-user';
process.env.REDIS_PASSWORD = 'redis-password';
const { redis } = getEnv();
expect(redis).toEqual(sentinelConfig);
});
it('should reject invalid json', () => {
process.env.REDIS_URL = `ioredis://${Buffer.from('{ "invalid json"').toString('base64')}`;
expect(() => getEnv()).toThrowError('Failed to decode redis options');
});
});
describe('noColor', () => { describe('noColor', () => {
beforeEach(() => { beforeEach(() => {
delete process.env.NO_COLOR; delete process.env.NO_COLOR;

View File

@ -1,25 +1,14 @@
import { RegisterQueueOptions } from '@nestjs/bullmq';
import { Inject, Injectable, Optional } from '@nestjs/common'; import { Inject, Injectable, Optional } from '@nestjs/common';
import { QueueOptions } from 'bullmq';
import { plainToInstance } from 'class-transformer'; import { plainToInstance } from 'class-transformer';
import { validateSync } from 'class-validator'; import { validateSync } from 'class-validator';
import { Request, Response } from 'express'; import { Request, Response } from 'express';
import { RedisOptions } from 'ioredis';
import { CLS_ID, ClsModuleOptions } from 'nestjs-cls'; import { CLS_ID, ClsModuleOptions } from 'nestjs-cls';
import { OpenTelemetryModuleOptions } from 'nestjs-otel/lib/interfaces'; import { OpenTelemetryModuleOptions } from 'nestjs-otel/lib/interfaces';
import { join } from 'node:path'; import { join, resolve } from 'node:path';
import { citiesFile, excludePaths, IWorker } from 'src/constants'; import { citiesFile, excludePaths, IWorker } from 'src/constants';
import { Telemetry } from 'src/decorators'; import { Telemetry } from 'src/decorators';
import { EnvDto } from 'src/dtos/env.dto'; import { EnvDto } from 'src/dtos/env.dto';
import { import { DatabaseExtension, ImmichEnvironment, ImmichHeader, ImmichTelemetry, ImmichWorker, LogLevel } from 'src/enum';
DatabaseExtension,
ImmichEnvironment,
ImmichHeader,
ImmichTelemetry,
ImmichWorker,
LogLevel,
QueueName,
} from 'src/enum';
import { DatabaseConnectionParams, VectorExtension } from 'src/types'; import { DatabaseConnectionParams, VectorExtension } from 'src/types';
import { setDifference } from 'src/utils/set'; import { setDifference } from 'src/utils/set';
@ -46,11 +35,6 @@ export interface EnvData {
thirdPartySupportUrl?: string; thirdPartySupportUrl?: string;
}; };
bull: {
config: QueueOptions;
queues: RegisterQueueOptions[];
};
cls: { cls: {
config: ClsModuleOptions; config: ClsModuleOptions;
}; };
@ -87,8 +71,6 @@ export interface EnvData {
}; };
}; };
redis: RedisOptions;
telemetry: { telemetry: {
apiPort: number; apiPort: number;
microservicesPort: number; microservicesPort: number;
@ -149,28 +131,12 @@ const getEnv = (): EnvData => {
const isProd = environment === ImmichEnvironment.PRODUCTION; const isProd = environment === ImmichEnvironment.PRODUCTION;
const buildFolder = dto.IMMICH_BUILD_DATA || '/build'; const buildFolder = dto.IMMICH_BUILD_DATA || '/build';
const folders = { const folders = {
// eslint-disable-next-line unicorn/prefer-module
dist: resolve(`${__dirname}/..`),
geodata: join(buildFolder, 'geodata'), geodata: join(buildFolder, 'geodata'),
web: join(buildFolder, 'www'), web: join(buildFolder, 'www'),
}; };
let redisConfig = {
host: dto.REDIS_HOSTNAME || 'redis',
port: dto.REDIS_PORT || 6379,
db: dto.REDIS_DBINDEX || 0,
username: dto.REDIS_USERNAME || undefined,
password: dto.REDIS_PASSWORD || undefined,
path: dto.REDIS_SOCKET || undefined,
};
const redisUrl = dto.REDIS_URL;
if (redisUrl && redisUrl.startsWith('ioredis://')) {
try {
redisConfig = JSON.parse(Buffer.from(redisUrl.slice(10), 'base64').toString());
} catch (error) {
throw new Error(`Failed to decode redis options: ${error}`);
}
}
const includedTelemetries = const includedTelemetries =
dto.IMMICH_TELEMETRY_INCLUDE === 'all' dto.IMMICH_TELEMETRY_INCLUDE === 'all'
? new Set(Object.values(ImmichTelemetry)) ? new Set(Object.values(ImmichTelemetry))
@ -218,19 +184,6 @@ const getEnv = (): EnvData => {
thirdPartySupportUrl: dto.IMMICH_THIRD_PARTY_SUPPORT_URL, thirdPartySupportUrl: dto.IMMICH_THIRD_PARTY_SUPPORT_URL,
}, },
bull: {
config: {
prefix: 'immich_bull',
connection: { ...redisConfig },
defaultJobOptions: {
attempts: 3,
removeOnComplete: true,
removeOnFail: false,
},
},
queues: Object.values(QueueName).map((name) => ({ name })),
},
cls: { cls: {
config: { config: {
middleware: { middleware: {
@ -269,8 +222,6 @@ const getEnv = (): EnvData => {
}, },
}, },
redis: redisConfig,
resourcePaths: { resourcePaths: {
lockFile: join(buildFolder, 'build-lock.json'), lockFile: join(buildFolder, 'build-lock.json'),
geodata: { geodata: {

View File

@ -64,6 +64,9 @@ type EventMap = {
'assets.delete': [{ assetIds: string[]; userId: string }]; 'assets.delete': [{ assetIds: string[]; userId: string }];
'assets.restore': [{ assetIds: string[]; userId: string }]; 'assets.restore': [{ assetIds: string[]; userId: string }];
'queue.pause': [QueueName];
'queue.resume': [QueueName];
'job.start': [QueueName, JobItem]; 'job.start': [QueueName, JobItem];
'job.failed': [{ job: JobItem; error: Error | any }]; 'job.failed': [{ job: JobItem; error: Error | any }];
@ -85,7 +88,7 @@ type EventMap = {
'websocket.connect': [{ userId: string }]; 'websocket.connect': [{ userId: string }];
}; };
export const serverEvents = ['config.update'] as const; export const serverEvents = ['config.update', 'queue.pause', 'queue.resume'] as const;
export type ServerEvents = (typeof serverEvents)[number]; export type ServerEvents = (typeof serverEvents)[number];
export type EmitEvent = keyof EventMap; export type EmitEvent = keyof EventMap;

View File

@ -1,15 +1,20 @@
import { getQueueToken } from '@nestjs/bullmq';
import { Injectable } from '@nestjs/common'; import { Injectable } from '@nestjs/common';
import { ModuleRef, Reflector } from '@nestjs/core'; import { ModuleRef, Reflector } from '@nestjs/core';
import { JobsOptions, Queue, Worker } from 'bullmq';
import { ClassConstructor } from 'class-transformer'; import { ClassConstructor } from 'class-transformer';
import { setTimeout } from 'node:timers/promises'; import { makeWorkerUtils, run, Runner, TaskSpec, WorkerUtils } from 'graphile-worker';
import { JobConfig } from 'src/decorators'; import { Kysely } from 'kysely';
import { JobName, JobStatus, MetadataKey, QueueCleanType, QueueName } from 'src/enum'; import { DateTime, Duration } from 'luxon';
import { InjectKysely } from 'nestjs-kysely';
import pg, { PoolConfig } from 'pg';
import { DB } from 'src/db';
import { GenerateSql, JobConfig } from 'src/decorators';
import { JobName, JobStatus, MetadataKey, QueueName, SystemMetadataKey } from 'src/enum';
import { ConfigRepository } from 'src/repositories/config.repository'; import { ConfigRepository } from 'src/repositories/config.repository';
import { EventRepository } from 'src/repositories/event.repository'; import { EventRepository } from 'src/repositories/event.repository';
import { LoggingRepository } from 'src/repositories/logging.repository'; import { LoggingRepository } from 'src/repositories/logging.repository';
import { IEntityJob, JobCounts, JobItem, JobOf, QueueStatus } from 'src/types'; import { SystemMetadataRepository } from 'src/repositories/system-metadata.repository';
import { JobCounts, JobItem, JobOf, QueueStatus } from 'src/types';
import { asPostgresConnectionConfig } from 'src/utils/database';
import { getKeyByValue, getMethodNames, ImmichStartupError } from 'src/utils/misc'; import { getKeyByValue, getMethodNames, ImmichStartupError } from 'src/utils/misc';
type JobMapItem = { type JobMapItem = {
@ -19,26 +24,38 @@ type JobMapItem = {
label: string; label: string;
}; };
type QueueConfiguration = {
paused: boolean;
concurrency: number;
};
@Injectable() @Injectable()
export class JobRepository { export class JobRepository {
private workers: Partial<Record<QueueName, Worker>> = {};
private handlers: Partial<Record<JobName, JobMapItem>> = {}; private handlers: Partial<Record<JobName, JobMapItem>> = {};
// todo inject the pg pool
private pool?: pg.Pool;
// todo inject worker utils?
private workerUtils?: WorkerUtils;
private queueConfig: Record<string, QueueConfiguration> = {};
private runners: Record<string, Runner> = {};
constructor( constructor(
private moduleRef: ModuleRef, @InjectKysely() private db: Kysely<DB>,
private configRepository: ConfigRepository,
private eventRepository: EventRepository,
private logger: LoggingRepository, private logger: LoggingRepository,
private moduleRef: ModuleRef,
private eventRepository: EventRepository,
private configRepository: ConfigRepository,
private systemMetadataRepository: SystemMetadataRepository,
) { ) {
this.logger.setContext(JobRepository.name); logger.setContext(JobRepository.name);
} }
setup(services: ClassConstructor<unknown>[]) { async setup(services: ClassConstructor<unknown>[]) {
const reflector = this.moduleRef.get(Reflector, { strict: false }); const reflector = this.moduleRef.get(Reflector, { strict: false });
// discovery for (const service of services) {
for (const Service of services) { const instance = this.moduleRef.get<any>(service);
const instance = this.moduleRef.get<any>(Service);
for (const methodName of getMethodNames(instance)) { for (const methodName of getMethodNames(instance)) {
const handler = instance[methodName]; const handler = instance[methodName];
const config = reflector.get<JobConfig>(MetadataKey.JOB_CONFIG, handler); const config = reflector.get<JobConfig>(MetadataKey.JOB_CONFIG, handler);
@ -47,7 +64,7 @@ export class JobRepository {
} }
const { name: jobName, queue: queueName } = config; const { name: jobName, queue: queueName } = config;
const label = `${Service.name}.${handler.name}`; const label = `${service.name}.${handler.name}`;
// one handler per job // one handler per job
if (this.handlers[jobName]) { if (this.handlers[jobName]) {
@ -70,176 +87,216 @@ export class JobRepository {
} }
} }
// no missing handlers const { database } = this.configRepository.getEnv();
for (const [jobKey, jobName] of Object.entries(JobName)) { const pool = new pg.Pool({
const item = this.handlers[jobName]; ...asPostgresConnectionConfig(database.config),
if (!item) { max: 100,
const errorMessage = `Failed to find job handler for Job.${jobKey} ("${jobName}")`; } as PoolConfig);
this.logger.error(
`${errorMessage}. Make sure to add the @OnJob({ name: JobName.${jobKey}, queue: QueueName.XYZ }) decorator for the new job.`, // todo: remove debug info
); setInterval(() => {
throw new ImmichStartupError(errorMessage); this.logger.log(`connections:
total: ${pool.totalCount}
idle: ${pool.idleCount}
waiting: ${pool.waitingCount}`);
}, 5000);
pool.setMaxListeners(100);
pool.on('connect', (client) => {
client.setMaxListeners(200);
});
this.pool = pool;
this.workerUtils = await makeWorkerUtils({ pgPool: pool });
} }
async start(queueName: QueueName, concurrency?: number): Promise<void> {
if (concurrency) {
this.queueConfig[queueName] = {
...this.queueConfig[queueName],
concurrency,
};
} else {
concurrency = this.queueConfig[queueName].concurrency;
}
if (this.queueConfig[queueName].paused) {
return;
}
await this.stop(queueName);
this.runners[queueName] = await run({
concurrency,
taskList: {
[queueName]: async (payload: unknown): Promise<void> => {
this.logger.log(`Job ${queueName} started with payload: ${JSON.stringify(payload)}`);
await this.eventRepository.emit('job.start', queueName, payload as JobItem);
},
},
pgPool: this.pool,
});
}
async stop(queueName: QueueName): Promise<void> {
const runner = this.runners[queueName];
if (runner) {
await runner.stop();
delete this.runners[queueName];
} }
} }
startWorkers() { async pause(queueName: QueueName): Promise<void> {
const { bull } = this.configRepository.getEnv(); await this.setState(queueName, true);
for (const queueName of Object.values(QueueName)) { await this.stop(queueName);
this.logger.debug(`Starting worker for queue: ${queueName}`);
this.workers[queueName] = new Worker(
queueName,
(job) => this.eventRepository.emit('job.start', queueName, job as JobItem),
{ ...bull.config, concurrency: 1 },
);
}
} }
async run({ name, data }: JobItem) { async resume(queueName: QueueName): Promise<void> {
await this.setState(queueName, false);
await this.start(queueName);
}
private async setState(queueName: QueueName, paused: boolean): Promise<void> {
const state = await this.systemMetadataRepository.get(SystemMetadataKey.QUEUES_STATE);
await this.systemMetadataRepository.set(SystemMetadataKey.QUEUES_STATE, {
...state,
[queueName]: { paused },
});
this.queueConfig[queueName] = {
...this.queueConfig[queueName],
paused,
};
}
// todo: we should consolidate queue and job names and have queues be
// homogenous.
//
// the reason there are multiple kinds of jobs per queue is so that
// concurrency settings apply to all of them. We could instead create a
// concept of "queue" groups, such that workers will run for groups of queues
// rather than just a single queue and achieve the same outcome.
private getQueueName(name: JobName) {
return (this.handlers[name] as JobMapItem).queueName;
}
async run({ name, data }: JobItem): Promise<JobStatus> {
const item = this.handlers[name as JobName]; const item = this.handlers[name as JobName];
if (!item) { if (!item) {
this.logger.warn(`Skipping unknown job: "${name}"`); this.logger.warn(`Skipping unknown job: "${name}"`);
return JobStatus.SKIPPED; return JobStatus.SKIPPED;
} }
return item.handler(data); return item.handler(data);
} }
setConcurrency(queueName: QueueName, concurrency: number) { async queue(item: JobItem): Promise<void> {
const worker = this.workers[queueName]; await this.workerUtils!.addJob(this.getQueueName(item.name), item, this.getJobOptions(item));
if (!worker) {
this.logger.warn(`Unable to set queue concurrency, worker not found: '${queueName}'`);
return;
}
worker.concurrency = concurrency;
}
async getQueueStatus(name: QueueName): Promise<QueueStatus> {
const queue = this.getQueue(name);
return {
isActive: !!(await queue.getActiveCount()),
isPaused: await queue.isPaused(),
};
}
pause(name: QueueName) {
return this.getQueue(name).pause();
}
resume(name: QueueName) {
return this.getQueue(name).resume();
}
empty(name: QueueName) {
return this.getQueue(name).drain();
}
clear(name: QueueName, type: QueueCleanType) {
return this.getQueue(name).clean(0, 1000, type);
}
getJobCounts(name: QueueName): Promise<JobCounts> {
return this.getQueue(name).getJobCounts(
'active',
'completed',
'failed',
'delayed',
'waiting',
'paused',
) as unknown as Promise<JobCounts>;
}
private getQueueName(name: JobName) {
return (this.handlers[name] as JobMapItem).queueName;
} }
async queueAll(items: JobItem[]): Promise<void> { async queueAll(items: JobItem[]): Promise<void> {
if (items.length === 0) { await Promise.all(items.map((item) => this.queue(item)));
return;
} }
const promises = []; // todo: are we actually generating sql
const itemsByQueue = {} as Record<string, (JobItem & { data: any; options: JobsOptions | undefined })[]>; async clear(name: QueueName): Promise<void> {
for (const item of items) { await this.db
const queueName = this.getQueueName(item.name); .deleteFrom('graphile_worker._private_jobs')
const job = { .where(({ eb, selectFrom }) =>
name: item.name, eb('task_id', 'in', selectFrom('graphile_worker._private_tasks').select('id').where('identifier', '=', name)),
data: item.data || {}, )
options: this.getJobOptions(item) || undefined, .execute();
} as JobItem & { data: any; options: JobsOptions | undefined };
if (job.options?.jobId) { const workers = await this.db
// need to use add() instead of addBulk() for jobId deduplication .selectFrom('graphile_worker.jobs')
promises.push(this.getQueue(queueName).add(item.name, item.data, job.options)); .select('locked_by')
} else { .where('locked_by', 'is not', null)
itemsByQueue[queueName] = itemsByQueue[queueName] || []; .distinct()
itemsByQueue[queueName].push(job); .execute();
}
// Potentially dangerous? It helps if jobs get stuck active though. The
// documentation says that stuck jobs will be unlocked automatically after 4
// hours. Though, it can be strange to click "clear" in the UI and see
// nothing happen. Especially as the UI is binary, such that new jobs cannot
// usually be scheduled unless both active and waiting are zero.
await this.workerUtils!.forceUnlockWorkers(workers.map((worker) => worker.locked_by!));
} }
for (const [queueName, jobs] of Object.entries(itemsByQueue)) { async clearFailed(name: QueueName): Promise<void> {
const queue = this.getQueue(queueName as QueueName); await this.db
promises.push(queue.addBulk(jobs)); .deleteFrom('graphile_worker._private_jobs')
.where(({ eb, selectFrom }) =>
eb(
'task_id',
'in',
selectFrom('graphile_worker._private_tasks')
.select('id')
.where((eb) => eb.and([eb('identifier', '=', name), eb('attempts', '>=', eb.ref('max_attempts'))])),
),
)
.execute();
} }
await Promise.all(promises); // todo: are we actually generating sql
@GenerateSql({ params: [] })
async getJobCounts(name: QueueName): Promise<JobCounts> {
return await this.db
.selectFrom('graphile_worker.jobs')
.select((eb) => [
eb.fn
.countAll<number>()
.filterWhere((eb) => eb.and([eb('task_identifier', '=', name), eb('locked_by', 'is not', null)]))
.as('active'),
eb.fn
.countAll<number>()
.filterWhere((eb) =>
eb.and([
eb('task_identifier', '=', name),
eb('locked_by', 'is', null),
eb('run_at', '<=', eb.fn<Date>('now')),
]),
)
.as('waiting'),
eb.fn
.countAll<number>()
.filterWhere((eb) =>
eb.and([
eb('task_identifier', '=', name),
eb('locked_by', 'is', null),
eb('run_at', '>', eb.fn<Date>('now')),
]),
)
.as('delayed'),
eb.fn
.countAll<number>()
.filterWhere((eb) => eb.and([eb('task_identifier', '=', name), eb('attempts', '>=', eb.ref('max_attempts'))]))
.as('failed'),
])
.executeTakeFirstOrThrow();
} }
async queue(item: JobItem): Promise<void> { async getQueueStatus(queueName: QueueName): Promise<QueueStatus> {
return this.queueAll([item]); const state = await this.systemMetadataRepository.get(SystemMetadataKey.QUEUES_STATE);
return { paused: state?.[queueName]?.paused ?? false };
} }
async waitForQueueCompletion(...queues: QueueName[]): Promise<void> { private getJobOptions(item: JobItem): TaskSpec | undefined {
let activeQueue: QueueStatus | undefined;
do {
const statuses = await Promise.all(queues.map((name) => this.getQueueStatus(name)));
activeQueue = statuses.find((status) => status.isActive);
} while (activeQueue);
{
this.logger.verbose(`Waiting for ${activeQueue} queue to stop...`);
await setTimeout(1000);
}
}
private getJobOptions(item: JobItem): JobsOptions | null {
switch (item.name) { switch (item.name) {
case JobName.NOTIFY_ALBUM_UPDATE: { case JobName.NOTIFY_ALBUM_UPDATE: {
return { jobId: item.data.id, delay: item.data?.delay }; let runAt: Date | undefined;
if (item.data?.delay) {
runAt = DateTime.now().plus(Duration.fromMillis(item.data.delay)).toJSDate();
}
return { jobKey: item.data.id, runAt };
} }
case JobName.STORAGE_TEMPLATE_MIGRATION_SINGLE: { case JobName.STORAGE_TEMPLATE_MIGRATION_SINGLE: {
return { jobId: item.data.id }; return { jobKey: QueueName.STORAGE_TEMPLATE_MIGRATION };
} }
case JobName.GENERATE_PERSON_THUMBNAIL: { case JobName.GENERATE_PERSON_THUMBNAIL: {
return { priority: 1 }; return { priority: 1 };
} }
case JobName.QUEUE_FACIAL_RECOGNITION: { case JobName.QUEUE_FACIAL_RECOGNITION: {
return { jobId: JobName.QUEUE_FACIAL_RECOGNITION }; return { jobKey: JobName.QUEUE_FACIAL_RECOGNITION };
}
default: {
return null;
} }
} }
} }
private getQueue(queue: QueueName): Queue {
return this.moduleRef.get<Queue>(getQueueToken(queue), { strict: false });
}
public async removeJob(jobId: string, name: JobName): Promise<IEntityJob | undefined> {
const existingJob = await this.getQueue(this.getQueueName(name)).getJob(jobId);
if (!existingJob) {
return;
}
try {
await existingJob.remove();
} catch (error: any) {
if (error.message?.includes('Missing key for job')) {
return;
}
throw error;
}
return existingJob.data;
}
} }

View File

@ -4,7 +4,6 @@ import { MetricOptions } from '@opentelemetry/api';
import { AsyncLocalStorageContextManager } from '@opentelemetry/context-async-hooks'; import { AsyncLocalStorageContextManager } from '@opentelemetry/context-async-hooks';
import { PrometheusExporter } from '@opentelemetry/exporter-prometheus'; import { PrometheusExporter } from '@opentelemetry/exporter-prometheus';
import { HttpInstrumentation } from '@opentelemetry/instrumentation-http'; import { HttpInstrumentation } from '@opentelemetry/instrumentation-http';
import { IORedisInstrumentation } from '@opentelemetry/instrumentation-ioredis';
import { NestInstrumentation } from '@opentelemetry/instrumentation-nestjs-core'; import { NestInstrumentation } from '@opentelemetry/instrumentation-nestjs-core';
import { PgInstrumentation } from '@opentelemetry/instrumentation-pg'; import { PgInstrumentation } from '@opentelemetry/instrumentation-pg';
import { resourceFromAttributes } from '@opentelemetry/resources'; import { resourceFromAttributes } from '@opentelemetry/resources';
@ -68,12 +67,7 @@ export const bootstrapTelemetry = (port: number) => {
}), }),
metricReader: new PrometheusExporter({ port }), metricReader: new PrometheusExporter({ port }),
contextManager: new AsyncLocalStorageContextManager(), contextManager: new AsyncLocalStorageContextManager(),
instrumentations: [ instrumentations: [new HttpInstrumentation(), new NestInstrumentation(), new PgInstrumentation()],
new HttpInstrumentation(),
new IORedisInstrumentation(),
new NestInstrumentation(),
new PgInstrumentation(),
],
views: [ views: [
{ {
instrumentName: '*', instrumentName: '*',

View File

@ -2,7 +2,7 @@ import { BadRequestException } from '@nestjs/common';
import { defaults, SystemConfig } from 'src/config'; import { defaults, SystemConfig } from 'src/config';
import { ImmichWorker, JobCommand, JobName, JobStatus, QueueName } from 'src/enum'; import { ImmichWorker, JobCommand, JobName, JobStatus, QueueName } from 'src/enum';
import { JobService } from 'src/services/job.service'; import { JobService } from 'src/services/job.service';
import { JobItem } from 'src/types'; import { JobCounts, JobItem } from 'src/types';
import { assetStub } from 'test/fixtures/asset.stub'; import { assetStub } from 'test/fixtures/asset.stub';
import { newTestService, ServiceMocks } from 'test/utils'; import { newTestService, ServiceMocks } from 'test/utils';
@ -21,14 +21,14 @@ describe(JobService.name, () => {
}); });
describe('onConfigUpdate', () => { describe('onConfigUpdate', () => {
it('should update concurrency', () => { it('should update concurrency', async () => {
sut.onConfigUpdate({ newConfig: defaults, oldConfig: {} as SystemConfig }); await sut.onConfigUpdate({ newConfig: defaults, oldConfig: {} as SystemConfig });
expect(mocks.job.setConcurrency).toHaveBeenCalledTimes(15); expect(mocks.job.start).toHaveBeenCalledTimes(15);
expect(mocks.job.setConcurrency).toHaveBeenNthCalledWith(5, QueueName.FACIAL_RECOGNITION, 1); expect(mocks.job.start).toHaveBeenNthCalledWith(5, QueueName.FACIAL_RECOGNITION, 1);
expect(mocks.job.setConcurrency).toHaveBeenNthCalledWith(7, QueueName.DUPLICATE_DETECTION, 1); expect(mocks.job.start).toHaveBeenNthCalledWith(7, QueueName.DUPLICATE_DETECTION, 1);
expect(mocks.job.setConcurrency).toHaveBeenNthCalledWith(8, QueueName.BACKGROUND_TASK, 5); expect(mocks.job.start).toHaveBeenNthCalledWith(8, QueueName.BACKGROUND_TASK, 5);
expect(mocks.job.setConcurrency).toHaveBeenNthCalledWith(9, QueueName.STORAGE_TEMPLATE_MIGRATION, 1); expect(mocks.job.start).toHaveBeenNthCalledWith(9, QueueName.STORAGE_TEMPLATE_MIGRATION, 1);
}); });
}); });
@ -55,29 +55,20 @@ describe(JobService.name, () => {
it('should get all job statuses', async () => { it('should get all job statuses', async () => {
mocks.job.getJobCounts.mockResolvedValue({ mocks.job.getJobCounts.mockResolvedValue({
active: 1, active: 1,
completed: 1,
failed: 1,
delayed: 1,
waiting: 1, waiting: 1,
paused: 1, delayed: 1,
}); failed: 1,
mocks.job.getQueueStatus.mockResolvedValue({
isActive: true,
isPaused: true,
}); });
const expectedJobStatus = { const expectedJobStatus = {
jobCounts: { jobCounts: {
active: 1, active: 1,
completed: 1, waiting: 1,
delayed: 1, delayed: 1,
failed: 1, failed: 1,
waiting: 1,
paused: 1,
}, },
queueStatus: { queueStatus: {
isActive: true, paused: true,
isPaused: true,
}, },
}; };
@ -114,14 +105,20 @@ describe(JobService.name, () => {
expect(mocks.job.resume).toHaveBeenCalledWith(QueueName.METADATA_EXTRACTION); expect(mocks.job.resume).toHaveBeenCalledWith(QueueName.METADATA_EXTRACTION);
}); });
it('should handle an empty command', async () => { it('should handle a clear command', async () => {
await sut.handleCommand(QueueName.METADATA_EXTRACTION, { command: JobCommand.EMPTY, force: false }); await sut.handleCommand(QueueName.METADATA_EXTRACTION, { command: JobCommand.CLEAR, force: false });
expect(mocks.job.empty).toHaveBeenCalledWith(QueueName.METADATA_EXTRACTION); expect(mocks.job.clear).toHaveBeenCalledWith(QueueName.METADATA_EXTRACTION);
});
it('should handle a clear-failed command', async () => {
await sut.handleCommand(QueueName.METADATA_EXTRACTION, { command: JobCommand.CLEAR_FAILED, force: false });
expect(mocks.job.clearFailed).toHaveBeenCalledWith(QueueName.METADATA_EXTRACTION);
}); });
it('should not start a job that is already running', async () => { it('should not start a job that is already running', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: true, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 1 } as JobCounts);
await expect( await expect(
sut.handleCommand(QueueName.VIDEO_CONVERSION, { command: JobCommand.START, force: false }), sut.handleCommand(QueueName.VIDEO_CONVERSION, { command: JobCommand.START, force: false }),
@ -132,7 +129,7 @@ describe(JobService.name, () => {
}); });
it('should handle a start video conversion command', async () => { it('should handle a start video conversion command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.VIDEO_CONVERSION, { command: JobCommand.START, force: false }); await sut.handleCommand(QueueName.VIDEO_CONVERSION, { command: JobCommand.START, force: false });
@ -140,7 +137,7 @@ describe(JobService.name, () => {
}); });
it('should handle a start storage template migration command', async () => { it('should handle a start storage template migration command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.STORAGE_TEMPLATE_MIGRATION, { command: JobCommand.START, force: false }); await sut.handleCommand(QueueName.STORAGE_TEMPLATE_MIGRATION, { command: JobCommand.START, force: false });
@ -148,7 +145,7 @@ describe(JobService.name, () => {
}); });
it('should handle a start smart search command', async () => { it('should handle a start smart search command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.SMART_SEARCH, { command: JobCommand.START, force: false }); await sut.handleCommand(QueueName.SMART_SEARCH, { command: JobCommand.START, force: false });
@ -156,7 +153,7 @@ describe(JobService.name, () => {
}); });
it('should handle a start metadata extraction command', async () => { it('should handle a start metadata extraction command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.METADATA_EXTRACTION, { command: JobCommand.START, force: false }); await sut.handleCommand(QueueName.METADATA_EXTRACTION, { command: JobCommand.START, force: false });
@ -164,7 +161,7 @@ describe(JobService.name, () => {
}); });
it('should handle a start sidecar command', async () => { it('should handle a start sidecar command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.SIDECAR, { command: JobCommand.START, force: false }); await sut.handleCommand(QueueName.SIDECAR, { command: JobCommand.START, force: false });
@ -172,7 +169,7 @@ describe(JobService.name, () => {
}); });
it('should handle a start thumbnail generation command', async () => { it('should handle a start thumbnail generation command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.THUMBNAIL_GENERATION, { command: JobCommand.START, force: false }); await sut.handleCommand(QueueName.THUMBNAIL_GENERATION, { command: JobCommand.START, force: false });
@ -180,7 +177,7 @@ describe(JobService.name, () => {
}); });
it('should handle a start face detection command', async () => { it('should handle a start face detection command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.FACE_DETECTION, { command: JobCommand.START, force: false }); await sut.handleCommand(QueueName.FACE_DETECTION, { command: JobCommand.START, force: false });
@ -188,7 +185,7 @@ describe(JobService.name, () => {
}); });
it('should handle a start facial recognition command', async () => { it('should handle a start facial recognition command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.FACIAL_RECOGNITION, { command: JobCommand.START, force: false }); await sut.handleCommand(QueueName.FACIAL_RECOGNITION, { command: JobCommand.START, force: false });
@ -196,7 +193,7 @@ describe(JobService.name, () => {
}); });
it('should handle a start backup database command', async () => { it('should handle a start backup database command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.BACKUP_DATABASE, { command: JobCommand.START, force: false }); await sut.handleCommand(QueueName.BACKUP_DATABASE, { command: JobCommand.START, force: false });
@ -204,7 +201,7 @@ describe(JobService.name, () => {
}); });
it('should throw a bad request when an invalid queue is used', async () => { it('should throw a bad request when an invalid queue is used', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false }); mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await expect( await expect(
sut.handleCommand(QueueName.BACKGROUND_TASK, { command: JobCommand.START, force: false }), sut.handleCommand(QueueName.BACKGROUND_TASK, { command: JobCommand.START, force: false }),

View File

@ -12,7 +12,6 @@ import {
JobName, JobName,
JobStatus, JobStatus,
ManualJobName, ManualJobName,
QueueCleanType,
QueueName, QueueName,
} from 'src/enum'; } from 'src/enum';
import { ArgOf, ArgsOf } from 'src/repositories/event.repository'; import { ArgOf, ArgsOf } from 'src/repositories/event.repository';
@ -56,7 +55,7 @@ export class JobService extends BaseService {
private services: ClassConstructor<unknown>[] = []; private services: ClassConstructor<unknown>[] = [];
@OnEvent({ name: 'config.init', workers: [ImmichWorker.MICROSERVICES] }) @OnEvent({ name: 'config.init', workers: [ImmichWorker.MICROSERVICES] })
onConfigInit({ newConfig: config }: ArgOf<'config.init'>) { async onConfigInit({ newConfig: config }: ArgOf<'config.init'>) {
this.logger.debug(`Updating queue concurrency settings`); this.logger.debug(`Updating queue concurrency settings`);
for (const queueName of Object.values(QueueName)) { for (const queueName of Object.values(QueueName)) {
let concurrency = 1; let concurrency = 1;
@ -64,21 +63,18 @@ export class JobService extends BaseService {
concurrency = config.job[queueName].concurrency; concurrency = config.job[queueName].concurrency;
} }
this.logger.debug(`Setting ${queueName} concurrency to ${concurrency}`); this.logger.debug(`Setting ${queueName} concurrency to ${concurrency}`);
this.jobRepository.setConcurrency(queueName, concurrency); await this.jobRepository.start(queueName, concurrency);
} }
} }
@OnEvent({ name: 'config.update', server: true, workers: [ImmichWorker.MICROSERVICES] }) @OnEvent({ name: 'config.update', server: true, workers: [ImmichWorker.MICROSERVICES] })
onConfigUpdate({ newConfig: config }: ArgOf<'config.update'>) { async onConfigUpdate({ newConfig: config }: ArgOf<'config.update'>) {
this.onConfigInit({ newConfig: config }); await this.onConfigInit({ newConfig: config });
} }
@OnEvent({ name: 'app.bootstrap', priority: BootstrapEventPriority.JobService }) @OnEvent({ name: 'app.bootstrap', priority: BootstrapEventPriority.JobService })
onBootstrap() { async onBootstrap() {
this.jobRepository.setup(this.services); await this.jobRepository.setup(this.services);
if (this.worker === ImmichWorker.MICROSERVICES) {
this.jobRepository.startWorkers();
}
} }
setServices(services: ClassConstructor<unknown>[]) { setServices(services: ClassConstructor<unknown>[]) {
@ -97,25 +93,20 @@ export class JobService extends BaseService {
await this.start(queueName, dto); await this.start(queueName, dto);
break; break;
} }
case JobCommand.PAUSE: { case JobCommand.PAUSE: {
await this.jobRepository.pause(queueName); this.eventRepository.serverSend('queue.pause', queueName);
break; break;
} }
case JobCommand.RESUME: { case JobCommand.RESUME: {
await this.jobRepository.resume(queueName); this.eventRepository.serverSend('queue.resume', queueName);
break; break;
} }
case JobCommand.CLEAR: {
case JobCommand.EMPTY: { await this.jobRepository.clear(queueName);
await this.jobRepository.empty(queueName);
break; break;
} }
case JobCommand.CLEAR_FAILED: { case JobCommand.CLEAR_FAILED: {
const failedJobs = await this.jobRepository.clear(queueName, QueueCleanType.FAILED); await this.jobRepository.clearFailed(queueName);
this.logger.debug(`Cleared failed jobs: ${failedJobs}`);
break; break;
} }
} }
@ -141,9 +132,9 @@ export class JobService extends BaseService {
} }
private async start(name: QueueName, { force }: JobCommandDto): Promise<void> { private async start(name: QueueName, { force }: JobCommandDto): Promise<void> {
const { isActive } = await this.jobRepository.getQueueStatus(name); const { active } = await this.jobRepository.getJobCounts(name);
if (isActive) { if (active > 0) {
throw new BadRequestException(`Job is already running`); throw new BadRequestException(`Jobs are already running`);
} }
this.telemetryRepository.jobs.addToCounter(`immich.queues.${snakeCase(name)}.started`, 1); this.telemetryRepository.jobs.addToCounter(`immich.queues.${snakeCase(name)}.started`, 1);
@ -203,6 +194,16 @@ export class JobService extends BaseService {
} }
} }
@OnEvent({ name: 'queue.pause', server: true, workers: [ImmichWorker.MICROSERVICES] })
async pause(...[queueName]: ArgsOf<'queue.pause'>): Promise<void> {
await this.jobRepository.pause(queueName);
}
@OnEvent({ name: 'queue.resume', server: true, workers: [ImmichWorker.MICROSERVICES] })
async resume(...[queueName]: ArgsOf<'queue.resume'>): Promise<void> {
await this.jobRepository.resume(queueName);
}
@OnEvent({ name: 'job.start' }) @OnEvent({ name: 'job.start' })
async onJobStart(...[queueName, job]: ArgsOf<'job.start'>) { async onJobStart(...[queueName, job]: ArgsOf<'job.start'>) {
const queueMetric = `immich.queues.${snakeCase(queueName)}.active`; const queueMetric = `immich.queues.${snakeCase(queueName)}.active`;

View File

@ -67,16 +67,12 @@ describe(MetadataService.name, () => {
}); });
describe('onBootstrapEvent', () => { describe('onBootstrapEvent', () => {
it('should pause and resume queue during init', async () => { it('should init', async () => {
mocks.job.pause.mockResolvedValue();
mocks.map.init.mockResolvedValue(); mocks.map.init.mockResolvedValue();
mocks.job.resume.mockResolvedValue();
await sut.onBootstrap(); await sut.onBootstrap();
expect(mocks.job.pause).toHaveBeenCalledTimes(1);
expect(mocks.map.init).toHaveBeenCalledTimes(1); expect(mocks.map.init).toHaveBeenCalledTimes(1);
expect(mocks.job.resume).toHaveBeenCalledTimes(1);
}); });
}); });

View File

@ -121,9 +121,7 @@ export class MetadataService extends BaseService {
this.logger.log('Initializing metadata service'); this.logger.log('Initializing metadata service');
try { try {
await this.jobRepository.pause(QueueName.METADATA_EXTRACTION);
await this.databaseRepository.withLock(DatabaseLock.GeodataImport, () => this.mapRepository.init()); await this.databaseRepository.withLock(DatabaseLock.GeodataImport, () => this.mapRepository.init());
await this.jobRepository.resume(QueueName.METADATA_EXTRACTION);
this.logger.log(`Initialized local reverse geocoder`); this.logger.log(`Initialized local reverse geocoder`);
} catch (error: Error | any) { } catch (error: Error | any) {

View File

@ -499,14 +499,13 @@ describe(NotificationService.name, () => {
}); });
it('should add new recipients for new images if job is already queued', async () => { it('should add new recipients for new images if job is already queued', async () => {
mocks.job.removeJob.mockResolvedValue({ id: '1', recipientIds: ['2', '3', '4'] } as INotifyAlbumUpdateJob);
await sut.onAlbumUpdate({ id: '1', recipientIds: ['1', '2', '3'] } as INotifyAlbumUpdateJob); await sut.onAlbumUpdate({ id: '1', recipientIds: ['1', '2', '3'] } as INotifyAlbumUpdateJob);
expect(mocks.job.queue).toHaveBeenCalledWith({ expect(mocks.job.queue).toHaveBeenCalledWith({
name: JobName.NOTIFY_ALBUM_UPDATE, name: JobName.NOTIFY_ALBUM_UPDATE,
data: { data: {
id: '1', id: '1',
delay: 300_000, delay: 300_000,
recipientIds: ['1', '2', '3', '4'], recipientIds: ['1', '2', '3'],
}, },
}); });
}); });

View File

@ -196,14 +196,15 @@ export class NotificationService extends BaseService {
data: { id, recipientIds, delay: NotificationService.albumUpdateEmailDelayMs }, data: { id, recipientIds, delay: NotificationService.albumUpdateEmailDelayMs },
}; };
const previousJobData = await this.jobRepository.removeJob(id, JobName.NOTIFY_ALBUM_UPDATE); // todo: https://github.com/immich-app/immich/pull/17879
if (previousJobData && this.isAlbumUpdateJob(previousJobData)) { // const previousJobData = await this.jobRepository.removeJob(id, JobName.NOTIFY_ALBUM_UPDATE);
for (const id of previousJobData.recipientIds) { // if (previousJobData && this.isAlbumUpdateJob(previousJobData)) {
if (!recipientIds.includes(id)) { // for (const id of previousJobData.recipientIds) {
recipientIds.push(id); // if (!recipientIds.includes(id)) {
} // recipientIds.push(id);
} // }
} // }
// }
await this.jobRepository.queue(job); await this.jobRepository.queue(job);
} }

View File

@ -529,10 +529,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({ mocks.job.getJobCounts.mockResolvedValue({
active: 1, active: 1,
waiting: 0, waiting: 0,
paused: 0,
completed: 0,
failed: 0,
delayed: 0, delayed: 0,
failed: 0,
}); });
mocks.systemMetadata.get.mockResolvedValue(systemConfigStub.machineLearningDisabled); mocks.systemMetadata.get.mockResolvedValue(systemConfigStub.machineLearningDisabled);
@ -546,10 +544,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({ mocks.job.getJobCounts.mockResolvedValue({
active: 1, active: 1,
waiting: 1, waiting: 1,
paused: 0,
completed: 0,
failed: 0,
delayed: 0, delayed: 0,
failed: 0,
}); });
await expect(sut.handleQueueRecognizeFaces({})).resolves.toBe(JobStatus.SKIPPED); await expect(sut.handleQueueRecognizeFaces({})).resolves.toBe(JobStatus.SKIPPED);
@ -561,10 +557,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({ mocks.job.getJobCounts.mockResolvedValue({
active: 1, active: 1,
waiting: 0, waiting: 0,
paused: 0,
completed: 0,
failed: 0,
delayed: 0, delayed: 0,
failed: 0,
}); });
mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1])); mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1]));
mocks.person.getAllWithoutFaces.mockResolvedValue([]); mocks.person.getAllWithoutFaces.mockResolvedValue([]);
@ -590,10 +584,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({ mocks.job.getJobCounts.mockResolvedValue({
active: 1, active: 1,
waiting: 0, waiting: 0,
paused: 0,
completed: 0,
failed: 0,
delayed: 0, delayed: 0,
failed: 0,
}); });
mocks.person.getAll.mockReturnValue(makeStream()); mocks.person.getAll.mockReturnValue(makeStream());
mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1])); mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1]));
@ -619,10 +611,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({ mocks.job.getJobCounts.mockResolvedValue({
active: 1, active: 1,
waiting: 0, waiting: 0,
paused: 0,
completed: 0,
failed: 0,
delayed: 0, delayed: 0,
failed: 0,
}); });
mocks.person.getAll.mockReturnValue(makeStream()); mocks.person.getAll.mockReturnValue(makeStream());
mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1])); mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1]));
@ -666,10 +656,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({ mocks.job.getJobCounts.mockResolvedValue({
active: 1, active: 1,
waiting: 0, waiting: 0,
paused: 0,
completed: 0,
failed: 0,
delayed: 0, delayed: 0,
failed: 0,
}); });
mocks.person.getAll.mockReturnValue(makeStream([faceStub.face1.person, personStub.randomPerson])); mocks.person.getAll.mockReturnValue(makeStream([faceStub.face1.person, personStub.randomPerson]));
mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1])); mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1]));

View File

@ -392,7 +392,8 @@ export class PersonService extends BaseService {
return JobStatus.SKIPPED; return JobStatus.SKIPPED;
} }
await this.jobRepository.waitForQueueCompletion(QueueName.THUMBNAIL_GENERATION, QueueName.FACE_DETECTION); // todo
// await this.jobRepository.waitForQueueCompletion(QueueName.THUMBNAIL_GENERATION, QueueName.FACE_DETECTION);
if (nightly) { if (nightly) {
const [state, latestFaceDate] = await Promise.all([ const [state, latestFaceDate] = await Promise.all([

View File

@ -256,16 +256,13 @@ export interface INotifyAlbumUpdateJob extends IEntityJob, IDelayedJob {
export interface JobCounts { export interface JobCounts {
active: number; active: number;
completed: number;
failed: number;
delayed: number;
waiting: number; waiting: number;
paused: number; delayed: number;
failed: number;
} }
export interface QueueStatus { export interface QueueStatus {
isActive: boolean; paused: boolean;
isPaused: boolean;
} }
export type JobItem = export type JobItem =
@ -450,6 +447,14 @@ export type MemoriesState = {
lastOnThisDayDate: string; lastOnThisDayDate: string;
}; };
export type QueueState = {
paused: boolean;
};
export type QueuesState = {
[key in QueueName]?: QueueState;
};
export interface SystemMetadata extends Record<SystemMetadataKey, Record<string, any>> { export interface SystemMetadata extends Record<SystemMetadataKey, Record<string, any>> {
[SystemMetadataKey.ADMIN_ONBOARDING]: { isOnboarded: boolean }; [SystemMetadataKey.ADMIN_ONBOARDING]: { isOnboarded: boolean };
[SystemMetadataKey.FACIAL_RECOGNITION_STATE]: { lastRun?: string }; [SystemMetadataKey.FACIAL_RECOGNITION_STATE]: { lastRun?: string };
@ -459,6 +464,7 @@ export interface SystemMetadata extends Record<SystemMetadataKey, Record<string,
[SystemMetadataKey.SYSTEM_FLAGS]: DeepPartial<SystemFlags>; [SystemMetadataKey.SYSTEM_FLAGS]: DeepPartial<SystemFlags>;
[SystemMetadataKey.VERSION_CHECK_STATE]: VersionCheckMetadata; [SystemMetadataKey.VERSION_CHECK_STATE]: VersionCheckMetadata;
[SystemMetadataKey.MEMORIES_STATE]: MemoriesState; [SystemMetadataKey.MEMORIES_STATE]: MemoriesState;
[SystemMetadataKey.QUEUES_STATE]: QueuesState;
} }
export type UserMetadataItem<T extends keyof UserMetadata = UserMetadataKey> = { export type UserMetadataItem<T extends keyof UserMetadata = UserMetadataKey> = {

View File

@ -32,7 +32,7 @@ export const asPostgresConnectionConfig = (params: DatabaseConnectionParams) =>
return { return {
host: params.host, host: params.host,
port: params.port, port: params.port,
username: params.username, user: params.username,
password: params.password, password: params.password,
database: params.database, database: params.database,
ssl: undefined, ssl: undefined,
@ -51,7 +51,7 @@ export const asPostgresConnectionConfig = (params: DatabaseConnectionParams) =>
return { return {
host: host ?? undefined, host: host ?? undefined,
port: port ? Number(port) : undefined, port: port ? Number(port) : undefined,
username: user, user,
password, password,
database: database ?? undefined, database: database ?? undefined,
ssl, ssl,
@ -92,7 +92,7 @@ export const getKyselyConfig = (
}, },
host: config.host, host: config.host,
port: config.port, port: config.port,
username: config.username, user: config.user,
password: config.password, password: config.password,
database: config.database, database: config.database,
ssl: config.ssl, ssl: config.ssl,

View File

@ -18,7 +18,6 @@ read_file_and_export "DB_HOSTNAME_FILE" "DB_HOSTNAME"
read_file_and_export "DB_DATABASE_NAME_FILE" "DB_DATABASE_NAME" read_file_and_export "DB_DATABASE_NAME_FILE" "DB_DATABASE_NAME"
read_file_and_export "DB_USERNAME_FILE" "DB_USERNAME" read_file_and_export "DB_USERNAME_FILE" "DB_USERNAME"
read_file_and_export "DB_PASSWORD_FILE" "DB_PASSWORD" read_file_and_export "DB_PASSWORD_FILE" "DB_PASSWORD"
read_file_and_export "REDIS_PASSWORD_FILE" "REDIS_PASSWORD"
export CPU_CORES="${CPU_CORES:=$(./get-cpus.sh)}" export CPU_CORES="${CPU_CORES:=$(./get-cpus.sh)}"
echo "Detected CPU Cores: $CPU_CORES" echo "Detected CPU Cores: $CPU_CORES"

View File

@ -8,12 +8,6 @@ const envData: EnvData = {
environment: ImmichEnvironment.PRODUCTION, environment: ImmichEnvironment.PRODUCTION,
buildMetadata: {}, buildMetadata: {},
bull: {
config: {
prefix: 'immich_bull',
},
queues: [{ name: 'queue-1' }],
},
cls: { cls: {
config: {}, config: {},
@ -52,12 +46,6 @@ const envData: EnvData = {
}, },
}, },
redis: {
host: 'redis',
port: 6379,
db: 0,
},
resourcePaths: { resourcePaths: {
lockFile: 'build-lock.json', lockFile: 'build-lock.json',
geodata: { geodata: {

View File

@ -5,18 +5,16 @@ import { Mocked, vitest } from 'vitest';
export const newJobRepositoryMock = (): Mocked<RepositoryInterface<JobRepository>> => { export const newJobRepositoryMock = (): Mocked<RepositoryInterface<JobRepository>> => {
return { return {
setup: vitest.fn(), setup: vitest.fn(),
startWorkers: vitest.fn(), start: vitest.fn(),
run: vitest.fn(), stop: vitest.fn(),
setConcurrency: vitest.fn(),
empty: vitest.fn(),
pause: vitest.fn(), pause: vitest.fn(),
resume: vitest.fn(), resume: vitest.fn(),
run: vitest.fn(),
queue: vitest.fn().mockImplementation(() => Promise.resolve()), queue: vitest.fn().mockImplementation(() => Promise.resolve()),
queueAll: vitest.fn().mockImplementation(() => Promise.resolve()), queueAll: vitest.fn().mockImplementation(() => Promise.resolve()),
getQueueStatus: vitest.fn(),
getJobCounts: vitest.fn(),
clear: vitest.fn(), clear: vitest.fn(),
waitForQueueCompletion: vitest.fn(), clearFailed: vitest.fn(),
removeJob: vitest.fn(), getJobCounts: vitest.fn(),
getQueueStatus: vitest.fn(),
}; };
}; };

View File

@ -47,20 +47,20 @@
onCommand, onCommand,
}: Props = $props(); }: Props = $props();
let waitingCount = $derived(jobCounts.waiting + jobCounts.paused + jobCounts.delayed); let waitingCount = $derived(jobCounts.waiting + jobCounts.delayed);
let isIdle = $derived(!queueStatus.isActive && !queueStatus.isPaused); let idle = $derived(jobCounts.active + jobCounts.waiting + jobCounts.delayed === 0);
let multipleButtons = $derived(allText || refreshText); let multipleButtons = $derived(allText || refreshText);
const commonClasses = 'flex place-items-center justify-between w-full py-2 sm:py-4 pe-4 ps-6'; const commonClasses = 'flex place-items-center justify-between w-full py-2 sm:py-4 pr-4 pl-6';
</script> </script>
<div <div
class="flex flex-col overflow-hidden rounded-2xl bg-gray-100 dark:bg-immich-dark-gray sm:flex-row sm:rounded-[35px]" class="flex flex-col overflow-hidden rounded-2xl bg-gray-100 dark:bg-immich-dark-gray sm:flex-row sm:rounded-[35px]"
> >
<div class="flex w-full flex-col"> <div class="flex w-full flex-col">
{#if queueStatus.isPaused} {#if queueStatus.paused}
<JobTileStatus color="warning">{$t('paused')}</JobTileStatus> <JobTileStatus color="warning">{$t('paused')}</JobTileStatus>
{:else if queueStatus.isActive} {:else if !idle}
<JobTileStatus color="success">{$t('active')}</JobTileStatus> <JobTileStatus color="success">{$t('active')}</JobTileStatus>
{/if} {/if}
<div class="flex flex-col gap-2 p-5 sm:p-7 md:p-9"> <div class="flex flex-col gap-2 p-5 sm:p-7 md:p-9">
@ -119,12 +119,12 @@
</div> </div>
<div <div
class="{commonClasses} flex-row-reverse rounded-b-lg bg-gray-200 text-immich-dark-bg dark:bg-gray-700 dark:text-immich-gray sm:rounded-s-none sm:rounded-e-lg" class="{commonClasses} rounded-b-lg bg-gray-200 text-immich-dark-bg dark:bg-gray-700 dark:text-immich-gray sm:rounded-s-none sm:rounded-e-lg"
> >
<p>{$t('waiting')}</p>
<p class="text-2xl"> <p class="text-2xl">
{waitingCount.toLocaleString($locale)} {waitingCount.toLocaleString($locale)}
</p> </p>
<p>{$t('waiting')}</p>
</div> </div>
</div> </div>
</div> </div>
@ -139,31 +139,15 @@
<Icon path={mdiAlertCircle} size="36" /> <Icon path={mdiAlertCircle} size="36" />
{$t('disabled').toUpperCase()} {$t('disabled').toUpperCase()}
</JobTileButton> </JobTileButton>
{/if} {:else}
{#if !idle}
{#if !disabled && !isIdle} <JobTileButton color="gray" onClick={() => onCommand({ command: JobCommand.Clear, force: false })}>
{#if waitingCount > 0}
<JobTileButton color="gray" onClick={() => onCommand({ command: JobCommand.Empty, force: false })}>
<Icon path={mdiClose} size="24" /> <Icon path={mdiClose} size="24" />
{$t('clear').toUpperCase()} {$t('clear').toUpperCase()}
</JobTileButton> </JobTileButton>
{/if} {/if}
{#if queueStatus.isPaused}
{@const size = waitingCount > 0 ? '24' : '48'}
<JobTileButton color="light-gray" onClick={() => onCommand({ command: JobCommand.Resume, force: false })}>
<!-- size property is not reactive, so have to use width and height -->
<Icon path={mdiFastForward} {size} />
{$t('resume').toUpperCase()}
</JobTileButton>
{:else}
<JobTileButton color="light-gray" onClick={() => onCommand({ command: JobCommand.Pause, force: false })}>
<Icon path={mdiPause} size="24" />
{$t('pause').toUpperCase()}
</JobTileButton>
{/if}
{/if}
{#if !disabled && multipleButtons && isIdle} {#if multipleButtons && idle}
{#if allText} {#if allText}
<JobTileButton color="dark-gray" onClick={() => onCommand({ command: JobCommand.Start, force: true })}> <JobTileButton color="dark-gray" onClick={() => onCommand({ command: JobCommand.Start, force: true })}>
<Icon path={mdiAllInclusive} size="24" /> <Icon path={mdiAllInclusive} size="24" />
@ -182,11 +166,25 @@
</JobTileButton> </JobTileButton>
{/if} {/if}
{#if !disabled && !multipleButtons && isIdle} {#if !multipleButtons && idle}
<JobTileButton color="light-gray" onClick={() => onCommand({ command: JobCommand.Start, force: false })}> <JobTileButton color="light-gray" onClick={() => onCommand({ command: JobCommand.Start, force: false })}>
<Icon path={mdiPlay} size="48" /> <Icon path={mdiPlay} size="24" />
{missingText} {missingText}
</JobTileButton> </JobTileButton>
{/if} {/if}
{#if queueStatus.paused}
<JobTileButton color="gray" onClick={() => onCommand({ command: JobCommand.Resume, force: false })}>
<!-- size property is not reactive, so have to use width and height -->
<Icon path={mdiFastForward} size="24" />
{$t('resume').toUpperCase()}
</JobTileButton>
{:else}
<JobTileButton color="gray" onClick={() => onCommand({ command: JobCommand.Pause, force: false })}>
<Icon path={mdiPause} size="24" />
{$t('pause').toUpperCase()}
</JobTileButton>
{/if}
{/if}
</div> </div>
</div> </div>

View File

@ -154,7 +154,7 @@
jobs[jobId] = await sendJobCommand({ id: jobId, jobCommandDto: jobCommand }); jobs[jobId] = await sendJobCommand({ id: jobId, jobCommandDto: jobCommand });
switch (jobCommand.command) { switch (jobCommand.command) {
case JobCommand.Empty: { case JobCommand.Clear: {
notificationController.show({ notificationController.show({
message: $t('admin.cleared_jobs', { values: { job: title } }), message: $t('admin.cleared_jobs', { values: { job: title } }),
type: NotificationType.Info, type: NotificationType.Info,