mirror of
https://github.com/immich-app/immich.git
synced 2026-04-11 03:32:01 -04:00
* feat(web): Synchronize information from deduplicated images * Added new settings menu to the the deduplication tab. * The toggable options in the settings are synchronization of: albums, favorites, ratings, description, visibility and location. * When synchronizing the albums, the resolved images will be added to all albums of the duplicates. * When synchronizing the favorite status, the resolved images will be marked as favorite, if at least one selectable image is marked as favorite. * When synchronizing the ratings, the highest rating from the selectable images will be applied to the resolved image. * When synchronizing the description, all descriptions from the selectable images will be merged into one description for the resolved image. * When synchronizing the visibility, the most restrictive visibility setting from the selectable images will be applied to the resolved image. * When synchronizing the location, if exactly one unique location exists among the selectable images, this location will be applied to the resolved image. * There is no additional UI for these settings to keep the visual clutter minimal. The settings are applied automatically based on the user's preferences. * Replace addAssetToAlbums with copyAsset * fix linter * feat(web): add duplicate sync fields and fix typo * feat(web): add tag sync and enhance duplicate resolution This update introduces tag synchronization for duplicate resolution, ensuring all unique tag IDs from duplicates are applied to kept assets. The visibility sync logic is updated to use a simplified ordering, as the hidden status items will never show up in a duplicate set. Album synchronization now merges albums directly via addAssetsToAlbums; as the approach with copyAsset API endpoint was ineffiecient. Description, rating, and location sync logic is improved for correctness. and deduplication. i18n strings were added / updated. * feat(server): move duplicate resolution to backend with sync and stacking Moves duplicate metadata synchronization from frontend to backend, enabling robust batch operations and proper validation. This is an improved refactor of PR #13851. New endpoints: - POST /duplicates/resolve - batch resolve with configurable metadata sync - POST /duplicates/stack - create stacks from duplicate groups - GET /duplicates - now includes suggestedKeepAssetIds based on file size and EXIF Key changes: - Move sync logic (albums, tags, favorites, ratings, descriptions, location, visibility) to server - Add server-side metadata merge policies with proper conflict resolution - Replace client-side resolution logic with new backend endpoints - Add comprehensive E2E tests (70+ test cases) and unit tests - Update OpenAPI specs and TypeScript SDK No breaking changes - only additions to existing API. * feat(preferences): enable all duplicate sync settings by default * chore: clean up * chore: clean up * refactor: rename & clean up * fix: preference upgrade * chore: linting * refactor(e2e): use updateAssets API for setAssetDuplicateId * fix: visibility sync logic in duplicate resolution * fix(duplicate): write description to exifUpdate Previously the duplicate resolution populated assetUpdate.description even though description belongs to exif info. * fix(duplicate): remove redundant updateLockedColumns wrapper updateAllExif already computes lockedProperties via distinctLocked using Object.keys(options). The wrapper added a lockedProperties key to the options object, causing the spurious string 'lockedProperties' to be stored in the lockedProperties array. * fix(duplicate): write merged tags to asset_exif to survive metadata re-extraction During duplicate resolution, replaceAssetTags correctly wrote merged tag IDs to the tag_asset table, but never updated asset_exif.tags or locked the tags property. The subsequent SidecarWrite → AssetExtractMetadata chain calls applyTagList, which destructively replaces tag_asset rows with whatever is in asset_exif.tags — still the original per-asset tags, not the merged set. Write merged tag values to asset_exif.tags via updateAllExif (which also locks the property via distinctLocked), and queue SidecarWrite when tags change so they persist to the sidecar file. * docs(duplicates): clarify location and tag sync behavior * refactor(duplicate): remove sync settings, always sync all metadata on resolve Remove DuplicateSyncSettingsDto and the per-field sync toggles (albums, favorites, rating, description, visibility, location, tags). Duplicate resolution now unconditionally syncs all metadata from trashed assets to kept assets. - Remove DuplicateSyncSettingsDto and settings field from DuplicateResolveDto - Update DuplicateService to always run all sync logic without conditionals - Delete DuplicateSettingsModal.svelte and settings gear button from UI - Remove DuplicateSettings type and duplicateSettings persisted store - Update unit and e2e tests to remove settings from resolve requests * docs: update duplicates utility to reflect automatic metadata sync * docs(web): replace duplicates info modal with link to documentation * chore: clean up * fix: add missing type cast to jsonAgg in duplicate repository getAll * fix: skip persisting rating=0 in duplicate merge to avoid unnecessary sidecar write --------- Co-authored-by: Toni <51962051+EinToni@users.noreply.github.com> Co-authored-by: Jason Rasmussen <jason@rasm.me> Co-authored-by: Jason Rasmussen <jrasm91@gmail.com>
712 lines
22 KiB
TypeScript
712 lines
22 KiB
TypeScript
import {
|
|
AssetMediaCreateDto,
|
|
AssetMediaResponseDto,
|
|
AssetResponseDto,
|
|
AssetVisibility,
|
|
CheckExistingAssetsDto,
|
|
CreateAlbumDto,
|
|
CreateLibraryDto,
|
|
JobCreateDto,
|
|
MaintenanceAction,
|
|
ManualJobName,
|
|
MetadataSearchDto,
|
|
Permission,
|
|
PersonCreateDto,
|
|
QueueCommandDto,
|
|
QueueName,
|
|
QueuesResponseLegacyDto,
|
|
SharedLinkCreateDto,
|
|
UpdateLibraryDto,
|
|
UserAdminCreateDto,
|
|
UserPreferencesUpdateDto,
|
|
ValidateLibraryDto,
|
|
checkExistingAssets,
|
|
createAlbum,
|
|
createApiKey,
|
|
createJob,
|
|
createLibrary,
|
|
createPartner,
|
|
createPerson,
|
|
createSharedLink,
|
|
createStack,
|
|
createUserAdmin,
|
|
deleteAssets,
|
|
deleteDatabaseBackup,
|
|
getAssetInfo,
|
|
getConfig,
|
|
getConfigDefaults,
|
|
getQueuesLegacy,
|
|
listDatabaseBackups,
|
|
login,
|
|
runQueueCommandLegacy,
|
|
scanLibrary,
|
|
searchAssets,
|
|
setBaseUrl,
|
|
setMaintenanceMode,
|
|
signUpAdmin,
|
|
tagAssets,
|
|
updateAdminOnboarding,
|
|
updateAlbumUser,
|
|
updateAssets,
|
|
updateConfig,
|
|
updateLibrary,
|
|
updateMyPreferences,
|
|
upsertTags,
|
|
validate,
|
|
} from '@immich/sdk';
|
|
import { BrowserContext } from '@playwright/test';
|
|
import { exec, spawn } from 'node:child_process';
|
|
import { createHash } from 'node:crypto';
|
|
import { createWriteStream, existsSync, mkdirSync, renameSync, rmSync, writeFileSync } from 'node:fs';
|
|
import { mkdtemp } from 'node:fs/promises';
|
|
import { tmpdir } from 'node:os';
|
|
import { dirname, join, resolve } from 'node:path';
|
|
import { Readable } from 'node:stream';
|
|
import { pipeline } from 'node:stream/promises';
|
|
import { setTimeout as setAsyncTimeout } from 'node:timers/promises';
|
|
import { promisify } from 'node:util';
|
|
import { createGzip } from 'node:zlib';
|
|
import pg from 'pg';
|
|
import { io, type Socket } from 'socket.io-client';
|
|
import { loginDto, signupDto } from 'src/fixtures';
|
|
import { makeRandomImage } from 'src/generators';
|
|
import request from 'supertest';
|
|
import { playwrightDbHost, playwrightHost, playwriteBaseUrl } from '../playwright.config';
|
|
|
|
export type { Emitter } from '@socket.io/component-emitter';
|
|
|
|
type CommandResponse = { stdout: string; stderr: string; exitCode: number | null };
|
|
type EventType = 'assetUpload' | 'assetUpdate' | 'assetDelete' | 'userDelete' | 'assetHidden';
|
|
type WaitOptions = { event: EventType; id?: string; total?: number; timeout?: number };
|
|
type AdminSetupOptions = { onboarding?: boolean };
|
|
type FileData = { bytes?: Buffer; filename: string };
|
|
|
|
const dbUrl = `postgres://postgres:postgres@${playwrightDbHost}:5435/immich`;
|
|
export const baseUrl = playwriteBaseUrl;
|
|
export const shareUrl = `${baseUrl}/share`;
|
|
export const app = `${baseUrl}/api`;
|
|
// TODO move test assets into e2e/assets
|
|
export const testAssetDir = resolve(import.meta.dirname, '../test-assets');
|
|
export const testAssetDirInternal = '/test-assets';
|
|
export const tempDir = tmpdir();
|
|
export const asBearerAuth = (accessToken: string) => ({ Authorization: `Bearer ${accessToken}` });
|
|
export const asKeyAuth = (key: string) => ({ 'x-api-key': key });
|
|
export const immichCli = (args: string[]) =>
|
|
executeCommand('pnpm', ['exec', 'immich', '-d', `/${tempDir}/immich/`, ...args], { cwd: '../cli' }).promise;
|
|
export const dockerExec = (args: string[]) =>
|
|
executeCommand('docker', ['exec', '-i', 'immich-e2e-server', '/bin/bash', '-c', args.join(' ')]);
|
|
export const immichAdmin = (args: string[]) => dockerExec([`immich-admin ${args.join(' ')}`]);
|
|
export const specialCharStrings = ["'", '"', ',', '{', '}', '*'];
|
|
export const TEN_TIMES = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
|
|
|
const executeCommand = (command: string, args: string[], options?: { cwd?: string }) => {
|
|
let _resolve: (value: CommandResponse) => void;
|
|
const promise = new Promise<CommandResponse>((resolve) => (_resolve = resolve));
|
|
const child = spawn(command, args, { stdio: 'pipe', cwd: options?.cwd });
|
|
|
|
let stdout = '';
|
|
let stderr = '';
|
|
|
|
child.stdout.on('data', (data) => (stdout += data.toString()));
|
|
child.stderr.on('data', (data) => (stderr += data.toString()));
|
|
child.on('exit', (exitCode) => {
|
|
_resolve({
|
|
stdout: stdout.trim(),
|
|
stderr: stderr.trim(),
|
|
exitCode,
|
|
});
|
|
});
|
|
|
|
return { promise, child };
|
|
};
|
|
|
|
let client: pg.Client | null = null;
|
|
|
|
const events: Record<EventType, Set<string>> = {
|
|
assetHidden: new Set<string>(),
|
|
assetUpload: new Set<string>(),
|
|
assetUpdate: new Set<string>(),
|
|
assetDelete: new Set<string>(),
|
|
userDelete: new Set<string>(),
|
|
};
|
|
|
|
const idCallbacks: Record<string, () => void> = {};
|
|
const countCallbacks: Record<string, { count: number; callback: () => void }> = {};
|
|
|
|
const execPromise = promisify(exec);
|
|
|
|
const onEvent = ({ event, id }: { event: EventType; id: string }) => {
|
|
// console.log(`Received event: ${event} [id=${id}]`);
|
|
const set = events[event];
|
|
|
|
set.add(id);
|
|
|
|
const idCallback = idCallbacks[id];
|
|
if (idCallback) {
|
|
idCallback();
|
|
delete idCallbacks[id];
|
|
}
|
|
|
|
const item = countCallbacks[event];
|
|
if (item) {
|
|
const { count, callback: countCallback } = item;
|
|
|
|
if (set.size >= count) {
|
|
countCallback();
|
|
delete countCallbacks[event];
|
|
}
|
|
}
|
|
};
|
|
|
|
export const utils = {
|
|
connectDatabase: async () => {
|
|
if (!client) {
|
|
client = new pg.Client(dbUrl);
|
|
client.on('end', () => (client = null));
|
|
client.on('error', () => (client = null));
|
|
await client.connect();
|
|
}
|
|
|
|
return client;
|
|
},
|
|
|
|
disconnectDatabase: async () => {
|
|
if (client) {
|
|
await client.end();
|
|
}
|
|
},
|
|
|
|
resetDatabase: async (tables?: string[]) => {
|
|
client = await utils.connectDatabase();
|
|
|
|
tables = tables || [
|
|
// TODO e2e test for deleting a stack, since it is quite complex
|
|
'stack',
|
|
'library',
|
|
'shared_link',
|
|
'person',
|
|
'album',
|
|
'asset',
|
|
'asset_face',
|
|
'activity',
|
|
'api_key',
|
|
'session',
|
|
'user',
|
|
'system_metadata',
|
|
'tag',
|
|
];
|
|
|
|
const truncateTables = tables.filter((table) => table !== 'system_metadata');
|
|
const sql: string[] = [];
|
|
|
|
if (truncateTables.length > 0) {
|
|
sql.push(`TRUNCATE "${truncateTables.join('", "')}" CASCADE;`);
|
|
}
|
|
|
|
if (tables.includes('system_metadata')) {
|
|
sql.push(`DELETE FROM "system_metadata" where "key" NOT IN ('reverse-geocoding-state', 'system-flags');`);
|
|
}
|
|
|
|
const query = sql.join('\n');
|
|
const maxRetries = 3;
|
|
|
|
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
|
try {
|
|
await client.query(query);
|
|
return;
|
|
} catch (error: any) {
|
|
if (error?.code === '40P01' && attempt < maxRetries) {
|
|
await new Promise((resolve) => setTimeout(resolve, 250 * attempt));
|
|
continue;
|
|
}
|
|
console.error('Failed to reset database', error);
|
|
throw error;
|
|
}
|
|
}
|
|
},
|
|
|
|
unzip: async (input: string, output: string) => {
|
|
await execPromise(`unzip -o -d "${output}" "${input}"`);
|
|
},
|
|
|
|
sha1: (bytes: Buffer) => createHash('sha1').update(bytes).digest('base64'),
|
|
|
|
connectWebsocket: async (accessToken: string) => {
|
|
const websocket = io(baseUrl, {
|
|
path: '/api/socket.io',
|
|
transports: ['websocket'],
|
|
extraHeaders: { Authorization: `Bearer ${accessToken}` },
|
|
autoConnect: true,
|
|
forceNew: true,
|
|
});
|
|
|
|
return new Promise<Socket>((resolve) => {
|
|
websocket
|
|
.on('connect', () => resolve(websocket))
|
|
.on('on_upload_success', (data: AssetResponseDto) => onEvent({ event: 'assetUpload', id: data.id }))
|
|
.on('on_asset_update', (data: AssetResponseDto) => onEvent({ event: 'assetUpdate', id: data.id }))
|
|
.on('on_asset_hidden', (assetId: string) => onEvent({ event: 'assetHidden', id: assetId }))
|
|
.on('on_asset_delete', (assetId: string) => onEvent({ event: 'assetDelete', id: assetId }))
|
|
.on('on_user_delete', (userId: string) => onEvent({ event: 'userDelete', id: userId }))
|
|
.connect();
|
|
});
|
|
},
|
|
|
|
disconnectWebsocket: (ws: Socket) => {
|
|
if (ws?.connected) {
|
|
ws.disconnect();
|
|
}
|
|
|
|
for (const set of Object.values(events)) {
|
|
set.clear();
|
|
}
|
|
},
|
|
|
|
resetEvents: () => {
|
|
for (const set of Object.values(events)) {
|
|
set.clear();
|
|
}
|
|
},
|
|
|
|
waitForWebsocketEvent: ({ event, id, total: count, timeout: ms }: WaitOptions): Promise<void> => {
|
|
return new Promise<void>((resolve, reject) => {
|
|
if (!id && !count) {
|
|
reject(new Error('id or count must be provided for waitForWebsocketEvent'));
|
|
}
|
|
|
|
const timeout = setTimeout(() => reject(new Error(`Timed out waiting for ${event} event`)), ms || 10_000);
|
|
const type = id ? `id=${id}` : `count=${count}`;
|
|
console.log(`Waiting for ${event} [${type}]`);
|
|
const set = events[event];
|
|
const onId = () => {
|
|
clearTimeout(timeout);
|
|
resolve();
|
|
};
|
|
if ((id && set.has(id)) || (count && set.size >= count)) {
|
|
onId();
|
|
return;
|
|
}
|
|
|
|
if (id) {
|
|
idCallbacks[id] = onId;
|
|
}
|
|
|
|
if (count) {
|
|
countCallbacks[event] = {
|
|
count,
|
|
callback: onId,
|
|
};
|
|
}
|
|
});
|
|
},
|
|
|
|
initSdk: () => {
|
|
setBaseUrl(app);
|
|
},
|
|
|
|
adminSetup: async (options?: AdminSetupOptions) => {
|
|
options = options || { onboarding: true };
|
|
|
|
await signUpAdmin({ signUpDto: signupDto.admin });
|
|
const response = await login({ loginCredentialDto: loginDto.admin });
|
|
if (options.onboarding) {
|
|
await updateAdminOnboarding(
|
|
{ adminOnboardingUpdateDto: { isOnboarded: true } },
|
|
{ headers: asBearerAuth(response.accessToken) },
|
|
);
|
|
}
|
|
return response;
|
|
},
|
|
|
|
userSetup: async (accessToken: string, dto: UserAdminCreateDto) => {
|
|
await createUserAdmin({ userAdminCreateDto: dto }, { headers: asBearerAuth(accessToken) });
|
|
return login({
|
|
loginCredentialDto: { email: dto.email, password: dto.password },
|
|
});
|
|
},
|
|
|
|
createApiKey: (accessToken: string, permissions: Permission[]) => {
|
|
return createApiKey({ apiKeyCreateDto: { name: 'e2e', permissions } }, { headers: asBearerAuth(accessToken) });
|
|
},
|
|
|
|
createAlbum: (accessToken: string, dto: CreateAlbumDto) =>
|
|
createAlbum({ createAlbumDto: dto }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
updateAlbumUser: (accessToken: string, args: Parameters<typeof updateAlbumUser>[0]) =>
|
|
updateAlbumUser(args, { headers: asBearerAuth(accessToken) }),
|
|
|
|
createAsset: async (
|
|
accessToken: string,
|
|
dto?: Partial<Omit<AssetMediaCreateDto, 'assetData' | 'sidecarData'>> & {
|
|
assetData?: FileData;
|
|
sidecarData?: FileData;
|
|
},
|
|
) => {
|
|
const _dto = {
|
|
deviceAssetId: 'test-1',
|
|
deviceId: 'test',
|
|
fileCreatedAt: new Date().toISOString(),
|
|
fileModifiedAt: new Date().toISOString(),
|
|
...dto,
|
|
};
|
|
|
|
const assetData = dto?.assetData?.bytes || makeRandomImage();
|
|
const filename = dto?.assetData?.filename || 'example.png';
|
|
|
|
if (dto?.assetData?.bytes) {
|
|
console.log(`Uploading ${filename}`);
|
|
}
|
|
|
|
const builder = request(app)
|
|
.post(`/assets`)
|
|
.attach('assetData', assetData, filename)
|
|
.set('Authorization', `Bearer ${accessToken}`);
|
|
|
|
if (dto?.sidecarData?.bytes) {
|
|
void builder.attach('sidecarData', dto.sidecarData.bytes, dto.sidecarData.filename);
|
|
}
|
|
|
|
for (const [key, value] of Object.entries(_dto)) {
|
|
void builder.field(key, String(value));
|
|
}
|
|
|
|
const { body } = await builder;
|
|
|
|
return body as AssetMediaResponseDto;
|
|
},
|
|
|
|
replaceAsset: async (
|
|
accessToken: string,
|
|
assetId: string,
|
|
dto?: Partial<Omit<AssetMediaCreateDto, 'assetData'>> & { assetData?: FileData },
|
|
) => {
|
|
const _dto = {
|
|
deviceAssetId: 'test-1',
|
|
deviceId: 'test',
|
|
fileCreatedAt: new Date().toISOString(),
|
|
fileModifiedAt: new Date().toISOString(),
|
|
...dto,
|
|
};
|
|
|
|
const assetData = dto?.assetData?.bytes || makeRandomImage();
|
|
const filename = dto?.assetData?.filename || 'example.png';
|
|
|
|
if (dto?.assetData?.bytes) {
|
|
console.log(`Uploading ${filename}`);
|
|
}
|
|
|
|
const builder = request(app)
|
|
.put(`/assets/${assetId}/original`)
|
|
.attach('assetData', assetData, filename)
|
|
.set('Authorization', `Bearer ${accessToken}`);
|
|
|
|
for (const [key, value] of Object.entries(_dto)) {
|
|
void builder.field(key, String(value));
|
|
}
|
|
|
|
const { body } = await builder;
|
|
|
|
return body as AssetMediaResponseDto;
|
|
},
|
|
|
|
createImageFile: (path: string) => {
|
|
if (!existsSync(dirname(path))) {
|
|
mkdirSync(dirname(path), { recursive: true });
|
|
}
|
|
writeFileSync(path, makeRandomImage());
|
|
},
|
|
|
|
createDirectory: (path: string) => {
|
|
if (!existsSync(path)) {
|
|
mkdirSync(path, { recursive: true });
|
|
}
|
|
},
|
|
|
|
removeImageFile: (path: string) => {
|
|
if (!existsSync(path)) {
|
|
return;
|
|
}
|
|
|
|
rmSync(path);
|
|
},
|
|
|
|
renameImageFile: (oldPath: string, newPath: string) => {
|
|
if (!existsSync(oldPath)) {
|
|
return;
|
|
}
|
|
|
|
renameSync(oldPath, newPath);
|
|
},
|
|
|
|
removeDirectory: (path: string) => {
|
|
if (!existsSync(path)) {
|
|
return;
|
|
}
|
|
|
|
rmSync(path, { recursive: true });
|
|
},
|
|
|
|
getSystemConfig: (accessToken: string) => getConfig({ headers: asBearerAuth(accessToken) }),
|
|
|
|
getAssetInfo: (accessToken: string, id: string) => getAssetInfo({ id }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
checkExistingAssets: (accessToken: string, checkExistingAssetsDto: CheckExistingAssetsDto) =>
|
|
checkExistingAssets({ checkExistingAssetsDto }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
searchAssets: async (accessToken: string, dto: MetadataSearchDto) => {
|
|
return searchAssets({ metadataSearchDto: dto }, { headers: asBearerAuth(accessToken) });
|
|
},
|
|
|
|
archiveAssets: (accessToken: string, ids: string[]) =>
|
|
updateAssets(
|
|
{ assetBulkUpdateDto: { ids, visibility: AssetVisibility.Archive } },
|
|
{ headers: asBearerAuth(accessToken) },
|
|
),
|
|
|
|
deleteAssets: (accessToken: string, ids: string[]) =>
|
|
deleteAssets({ assetBulkDeleteDto: { ids } }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
createPerson: async (accessToken: string, dto?: PersonCreateDto) => {
|
|
const person = await createPerson({ personCreateDto: dto || {} }, { headers: asBearerAuth(accessToken) });
|
|
await utils.setPersonThumbnail(person.id);
|
|
|
|
return person;
|
|
},
|
|
|
|
createFace: async ({ assetId, personId }: { assetId: string; personId: string }) => {
|
|
if (!client) {
|
|
return;
|
|
}
|
|
|
|
await client.query('INSERT INTO asset_face ("assetId", "personId") VALUES ($1, $2)', [assetId, personId]);
|
|
},
|
|
|
|
setPersonThumbnail: async (personId: string) => {
|
|
if (!client) {
|
|
return;
|
|
}
|
|
|
|
await client.query(`UPDATE "person" set "thumbnailPath" = '/my/awesome/thumbnail.jpg' where "id" = $1`, [personId]);
|
|
},
|
|
|
|
createSharedLink: (accessToken: string, dto: SharedLinkCreateDto) =>
|
|
createSharedLink({ sharedLinkCreateDto: dto }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
createLibrary: (accessToken: string, dto: CreateLibraryDto) =>
|
|
createLibrary({ createLibraryDto: dto }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
validateLibrary: (accessToken: string, id: string, dto: ValidateLibraryDto) =>
|
|
validate({ id, validateLibraryDto: dto }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
updateLibrary: (accessToken: string, id: string, dto: UpdateLibraryDto) =>
|
|
updateLibrary({ id, updateLibraryDto: dto }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
createPartner: (accessToken: string, id: string) =>
|
|
createPartner({ partnerCreateDto: { sharedWithId: id } }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
updateMyPreferences: (accessToken: string, userPreferencesUpdateDto: UserPreferencesUpdateDto) =>
|
|
updateMyPreferences({ userPreferencesUpdateDto }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
createStack: (accessToken: string, assetIds: string[]) =>
|
|
createStack({ stackCreateDto: { assetIds } }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
setAssetDuplicateId: (accessToken: string, assetId: string, duplicateId: string | null) =>
|
|
updateAssets({ assetBulkUpdateDto: { ids: [assetId], duplicateId } }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
upsertTags: (accessToken: string, tags: string[]) =>
|
|
upsertTags({ tagUpsertDto: { tags } }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
tagAssets: (accessToken: string, tagId: string, assetIds: string[]) =>
|
|
tagAssets({ id: tagId, bulkIdsDto: { ids: assetIds } }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
createJob: async (accessToken: string, jobCreateDto: JobCreateDto) =>
|
|
createJob({ jobCreateDto }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
queueCommand: async (accessToken: string, name: QueueName, queueCommandDto: QueueCommandDto) =>
|
|
runQueueCommandLegacy({ name, queueCommandDto }, { headers: asBearerAuth(accessToken) }),
|
|
|
|
setAuthCookies: async (context: BrowserContext, accessToken: string, domain = playwrightHost) =>
|
|
await context.addCookies([
|
|
{
|
|
name: 'immich_access_token',
|
|
value: accessToken,
|
|
domain,
|
|
path: '/',
|
|
expires: 2_058_028_213,
|
|
httpOnly: true,
|
|
secure: false,
|
|
sameSite: 'Lax',
|
|
},
|
|
{
|
|
name: 'immich_auth_type',
|
|
value: 'password',
|
|
domain,
|
|
path: '/',
|
|
expires: 2_058_028_213,
|
|
httpOnly: true,
|
|
secure: false,
|
|
sameSite: 'Lax',
|
|
},
|
|
{
|
|
name: 'immich_is_authenticated',
|
|
value: 'true',
|
|
domain,
|
|
path: '/',
|
|
expires: 2_058_028_213,
|
|
httpOnly: false,
|
|
secure: false,
|
|
sameSite: 'Lax',
|
|
},
|
|
]),
|
|
|
|
setMaintenanceAuthCookie: async (context: BrowserContext, token: string, domain = '127.0.0.1') =>
|
|
await context.addCookies([
|
|
{
|
|
name: 'immich_maintenance_token',
|
|
value: token,
|
|
domain,
|
|
path: '/',
|
|
expires: 2_058_028_213,
|
|
httpOnly: true,
|
|
secure: false,
|
|
sameSite: 'Lax',
|
|
},
|
|
]),
|
|
|
|
enterMaintenance: async (accessToken: string) => {
|
|
let setCookie: string[] | undefined;
|
|
|
|
await setMaintenanceMode(
|
|
{
|
|
setMaintenanceModeDto: {
|
|
action: MaintenanceAction.Start,
|
|
},
|
|
},
|
|
{
|
|
headers: asBearerAuth(accessToken),
|
|
fetch: (...args: Parameters<typeof fetch>) =>
|
|
fetch(...args).then((response) => {
|
|
setCookie = response.headers.getSetCookie();
|
|
return response;
|
|
}),
|
|
},
|
|
);
|
|
|
|
return setCookie;
|
|
},
|
|
|
|
resetTempFolder: () => {
|
|
rmSync(`${testAssetDir}/temp`, { recursive: true, force: true });
|
|
mkdirSync(`${testAssetDir}/temp`, { recursive: true });
|
|
},
|
|
|
|
async move(source: string, dest: string) {
|
|
return executeCommand('docker', ['exec', 'immich-e2e-server', 'mv', source, dest]).promise;
|
|
},
|
|
|
|
createBackup: async (accessToken: string) => {
|
|
await utils.createJob(accessToken, {
|
|
name: ManualJobName.BackupDatabase,
|
|
});
|
|
|
|
return utils.poll(
|
|
() => request(app).get('/admin/database-backups').set('Authorization', `Bearer ${accessToken}`),
|
|
({ status, body }) => status === 200 && body.backups.length === 1,
|
|
({ body }) => body.backups[0].filename,
|
|
);
|
|
},
|
|
|
|
resetBackups: async (accessToken: string) => {
|
|
const { backups } = await listDatabaseBackups({ headers: asBearerAuth(accessToken) });
|
|
|
|
const backupFiles = backups.map((b) => b.filename);
|
|
await deleteDatabaseBackup(
|
|
{ databaseBackupDeleteDto: { backups: backupFiles } },
|
|
{ headers: asBearerAuth(accessToken) },
|
|
);
|
|
},
|
|
|
|
prepareTestBackup: async (generate: 'empty' | 'corrupted') => {
|
|
const dir = await mkdtemp(join(tmpdir(), 'test-'));
|
|
const fn = join(dir, 'file');
|
|
|
|
const sql = Readable.from(generate === 'corrupted' ? 'IM CORRUPTED;' : 'SELECT 1;');
|
|
const gzip = createGzip();
|
|
const writeStream = createWriteStream(fn);
|
|
await pipeline(sql, gzip, writeStream);
|
|
|
|
await executeCommand('docker', ['cp', fn, `immich-e2e-server:/data/backups/development-${generate}.sql.gz`])
|
|
.promise;
|
|
},
|
|
|
|
resetAdminConfig: async (accessToken: string) => {
|
|
const defaultConfig = await getConfigDefaults({ headers: asBearerAuth(accessToken) });
|
|
await updateConfig({ systemConfigDto: defaultConfig }, { headers: asBearerAuth(accessToken) });
|
|
},
|
|
|
|
isQueueEmpty: async (accessToken: string, queue: keyof QueuesResponseLegacyDto) => {
|
|
const queues = await getQueuesLegacy({ headers: asBearerAuth(accessToken) });
|
|
const jobCounts = queues[queue].jobCounts;
|
|
return !jobCounts.active && !jobCounts.waiting;
|
|
},
|
|
|
|
waitForQueueFinish: (accessToken: string, queue: keyof QueuesResponseLegacyDto, ms?: number) => {
|
|
// eslint-disable-next-line no-async-promise-executor
|
|
return new Promise<void>(async (resolve, reject) => {
|
|
const timeout = setTimeout(() => reject(new Error('Timed out waiting for queue to empty')), ms || 10_000);
|
|
|
|
while (true) {
|
|
const done = await utils.isQueueEmpty(accessToken, queue);
|
|
if (done) {
|
|
break;
|
|
}
|
|
await setAsyncTimeout(200);
|
|
}
|
|
|
|
clearTimeout(timeout);
|
|
resolve();
|
|
});
|
|
},
|
|
|
|
cliLogin: async (accessToken: string) => {
|
|
const key = await utils.createApiKey(accessToken, [Permission.All]);
|
|
await immichCli(['login', app, `${key.secret}`]);
|
|
return key.secret;
|
|
},
|
|
|
|
scan: async (accessToken: string, id: string) => {
|
|
await scanLibrary({ id }, { headers: asBearerAuth(accessToken) });
|
|
|
|
await utils.waitForQueueFinish(accessToken, 'library');
|
|
await utils.waitForQueueFinish(accessToken, 'sidecar');
|
|
await utils.waitForQueueFinish(accessToken, 'metadataExtraction');
|
|
},
|
|
|
|
async poll<T>(cb: () => Promise<T>, validate: (value: T) => boolean, map?: (value: T) => any) {
|
|
let timeout = 0;
|
|
while (true) {
|
|
try {
|
|
const data = await cb();
|
|
if (validate(data)) {
|
|
return map ? map(data) : data;
|
|
}
|
|
timeout++;
|
|
if (timeout >= 10) {
|
|
throw 'Could not clean up test.';
|
|
}
|
|
await new Promise((resolve) => setTimeout(resolve, 5e2));
|
|
} catch {
|
|
// no-op
|
|
}
|
|
}
|
|
},
|
|
};
|
|
|
|
utils.initSdk();
|
|
|
|
if (!existsSync(`${testAssetDir}/albums`)) {
|
|
throw new Error(
|
|
`Test assets not found. Please checkout https://github.com/immich-app/test-assets into ${testAssetDir} before testing`,
|
|
);
|
|
}
|