Compare commits

..

1 Commits

Author SHA1 Message Date
Santo Shakil 1dd68e5950 fix(mobile): dedupe stale remote_asset rows on sync
queue a pre-delete on the matching partial-index tuple before each
upsert in updateAssetsV1/V2 so the second insert does not crash on
SqliteException(2067) when the server re-issues a new id for the
same (ownerId, checksum). closes #22522 #27186.
2026-05-15 23:31:38 +06:00
5 changed files with 208 additions and 28 deletions
@@ -23,8 +23,6 @@ import java.io.IOException
import java.nio.ByteBuffer
import java.util.concurrent.ConcurrentHashMap
private const val MAX_PREALLOC_BYTES = 128 * 1024 * 1024
private class RemoteRequest(val cancellationSignal: CancellationSignal)
class RemoteImagesImpl(context: Context) : RemoteImageApi {
@@ -230,6 +228,7 @@ private class CronetImageFetcher : ImageFetcher {
private val onComplete: () -> Unit,
) : UrlRequest.Callback() {
private var buffer: NativeByteBuffer? = null
private var wrapped: ByteBuffer? = null
private var error: Exception? = null
override fun onRedirectReceived(request: UrlRequest, info: UrlResponseInfo, newUrl: String) {
@@ -243,16 +242,15 @@ private class CronetImageFetcher : ImageFetcher {
}
try {
// Content-Length is a size hint only. With Content-Encoding (gzip/br/...),
// Cronet auto-decompresses and writes decompressed bytes to our buffer, which
// may exceed the wire/compressed Content-Length. Always use the growable
// buffer path so we can't overflow.
val contentLength = info.allHeaders["content-length"]?.firstOrNull()?.toIntOrNull() ?: 0
// Cap the up-front alloc: Content-Length is untrusted and can be huge or near
// Int.MAX_VALUE (overflowing `+1`). For larger responses the grow path takes over.
val initialSize = if (contentLength in 1..MAX_PREALLOC_BYTES) contentLength + 1 else INITIAL_BUFFER_SIZE
buffer = NativeByteBuffer(initialSize)
request.read(buffer!!.wrapRemaining())
if (contentLength > 0) {
buffer = NativeByteBuffer(contentLength + 1)
wrapped = NativeBuffer.wrap(buffer!!.pointer, contentLength + 1)
request.read(wrapped)
} else {
buffer = NativeByteBuffer(INITIAL_BUFFER_SIZE)
request.read(buffer!!.wrapRemaining())
}
} catch (e: Exception) {
error = e
return request.cancel()
@@ -265,18 +263,14 @@ private class CronetImageFetcher : ImageFetcher {
byteBuffer: ByteBuffer
) {
try {
val b = buffer!!
b.advance(byteBuffer.position())
// Reuse the caller-supplied ByteBuffer as long as we don't need to grow.
// It already points at our native memory with position advanced past the
// written bytes — Cronet can keep writing into the remaining tail.
// Only when the buffer is full do we grow (which may realloc + move the
// native pointer) and need a fresh wrap.
val buf = if (b.offset == b.capacity) {
b.ensureHeadroom()
b.wrapRemaining()
val buf = if (wrapped == null) {
buffer!!.run {
advance(byteBuffer.position())
ensureHeadroom()
wrapRemaining()
}
} else {
byteBuffer
wrapped
}
request.read(buf)
} catch (e: Exception) {
@@ -286,6 +280,7 @@ private class CronetImageFetcher : ImageFetcher {
}
override fun onSucceeded(request: UrlRequest, info: UrlResponseInfo) {
wrapped?.let { buffer!!.advance(it.position()) }
onSuccess(buffer!!)
onComplete()
}
@@ -197,6 +197,16 @@ class SyncStreamRepository extends DriftDatabaseRepository {
try {
await _db.batch((batch) {
for (final asset in data) {
// Avoid SqliteException(2067) when server re-issues a new id for
// the same (ownerId, checksum). #22522 #27186
_enqueueRemoteAssetDedupe(
batch,
id: asset.id,
ownerId: asset.ownerId,
checksum: asset.checksum,
libraryId: asset.libraryId,
);
final companion = RemoteAssetEntityCompanion(
name: Value(asset.originalFileName),
type: Value(asset.type.toAssetType()),
@@ -236,6 +246,15 @@ class SyncStreamRepository extends DriftDatabaseRepository {
try {
await _db.batch((batch) {
for (final asset in data) {
// See updateAssetsV1 for why this dedupe is required. #22522 #27186
_enqueueRemoteAssetDedupe(
batch,
id: asset.id,
ownerId: asset.ownerId,
checksum: asset.checksum,
libraryId: asset.libraryId,
);
final companion = RemoteAssetEntityCompanion(
name: Value(asset.originalFileName),
type: Value(asset.type.toAssetType()),
@@ -271,6 +290,39 @@ class SyncStreamRepository extends DriftDatabaseRepository {
}
}
/// Queues a DELETE that prunes any stale remote_asset row matching the
/// partial UNIQUE index for the incoming asset:
/// - libraryId IS NULL -> (owner_id, checksum)
/// - libraryId NOT NULL -> (owner_id, library_id, checksum)
/// The current id is excluded so a same-id update does not delete itself.
void _enqueueRemoteAssetDedupe(
Batch batch, {
required String id,
required String ownerId,
required String checksum,
required String? libraryId,
}) {
if (libraryId == null) {
batch.deleteWhere(
_db.remoteAssetEntity,
(row) =>
row.ownerId.equals(ownerId) &
row.checksum.equals(checksum) &
row.libraryId.isNull() &
row.id.equals(id).not(),
);
} else {
batch.deleteWhere(
_db.remoteAssetEntity,
(row) =>
row.ownerId.equals(ownerId) &
row.checksum.equals(checksum) &
row.libraryId.equals(libraryId) &
row.id.equals(id).not(),
);
}
}
Future<void> updateAssetsExifV1(Iterable<SyncAssetExifV1> data, {String debugLabel = 'user'}) async {
try {
await _db.batch((batch) {
@@ -28,6 +28,7 @@ SyncAssetV1 _createAsset({
String ownerId = 'user-1',
int? width,
int? height,
String? libraryId,
}) {
return SyncAssetV1(
id: id,
@@ -45,7 +46,38 @@ SyncAssetV1 _createAsset({
height: height,
deletedAt: null,
duration: null,
libraryId: null,
libraryId: libraryId,
livePhotoVideoId: null,
stackId: null,
thumbhash: null,
isEdited: false,
);
}
SyncAssetV2 _createAssetV2({
required String id,
required String checksum,
required String fileName,
String ownerId = 'user-1',
String? libraryId,
}) {
return SyncAssetV2(
id: id,
checksum: checksum,
originalFileName: fileName,
type: AssetTypeEnum.IMAGE,
ownerId: ownerId,
isFavorite: false,
fileCreatedAt: DateTime(2024, 1, 1),
fileModifiedAt: DateTime(2024, 1, 1),
createdAt: DateTime(2024, 1, 1),
localDateTime: DateTime(2024, 1, 1),
visibility: AssetVisibility.timeline,
width: null,
height: null,
deletedAt: null,
duration: 0,
libraryId: libraryId,
livePhotoVideoId: null,
stackId: null,
thumbhash: null,
@@ -240,4 +272,82 @@ void main() {
expect(after.backupSelection, equals(BackupSelection.none));
});
});
group('SyncStreamRepository - updateAssetsV1 dedupe (#22522 #27186)', () {
test('replaces stale row when new id arrives with same (ownerId, checksum) and library is null', () async {
await sut.updateUsersV1([_createUser()]);
await sut.updateAssetsV1([_createAsset(id: 'old-id', checksum: 'AAA', fileName: 'photo.jpg')]);
// Server re-issues a new id for the same content (replace-with-upload, immich-go, etc.)
await sut.updateAssetsV1([_createAsset(id: 'new-id', checksum: 'AAA', fileName: 'photo.jpg')]);
final rows = await db.remoteAssetEntity.select().get();
expect(rows, hasLength(1));
expect(rows.single.id, equals('new-id'));
expect(rows.single.checksum, equals('AAA'));
});
test('replaces stale row by (ownerId, libraryId, checksum) when library is not null', () async {
await sut.updateUsersV1([_createUser()]);
await sut.updateAssetsV1([
_createAsset(id: 'old-id', checksum: 'AAA', fileName: 'photo.jpg', libraryId: 'lib-1'),
]);
await sut.updateAssetsV1([
_createAsset(id: 'new-id', checksum: 'AAA', fileName: 'photo.jpg', libraryId: 'lib-1'),
]);
final rows = await db.remoteAssetEntity.select().get();
expect(rows, hasLength(1));
expect(rows.single.id, equals('new-id'));
expect(rows.single.libraryId, equals('lib-1'));
});
test('library and non-library rows with same (ownerId, checksum) coexist', () async {
await sut.updateUsersV1([_createUser()]);
await sut.updateAssetsV1([
_createAsset(id: 'lib-row', checksum: 'AAA', fileName: 'photo.jpg', libraryId: 'lib-1'),
_createAsset(id: 'main-row', checksum: 'AAA', fileName: 'photo.jpg'),
]);
final rows = await db.remoteAssetEntity.select().get();
expect(rows, hasLength(2), reason: 'library NULL and NOT NULL match different partial indexes');
expect(rows.map((r) => r.id).toSet(), equals({'lib-row', 'main-row'}));
});
test('different owners with same checksum coexist', () async {
await sut.updateUsersV1([_createUser(id: 'user-1')]);
await sut.updateUsersV1([_createUser(id: 'user-2')]);
await sut.updateAssetsV1([
_createAsset(id: 'a-id', checksum: 'AAA', fileName: 'photo.jpg', ownerId: 'user-1'),
_createAsset(id: 'b-id', checksum: 'AAA', fileName: 'photo.jpg', ownerId: 'user-2'),
]);
final rows = await db.remoteAssetEntity.select().get();
expect(rows, hasLength(2));
});
test('same id arriving again updates in place (no self-delete)', () async {
await sut.updateUsersV1([_createUser()]);
await sut.updateAssetsV1([_createAsset(id: 'same-id', checksum: 'AAA', fileName: 'photo.jpg')]);
await sut.updateAssetsV1([_createAsset(id: 'same-id', checksum: 'AAA', fileName: 'renamed.jpg')]);
final rows = await db.remoteAssetEntity.select().get();
expect(rows, hasLength(1));
expect(rows.single.id, equals('same-id'));
expect(rows.single.name, equals('renamed.jpg'), reason: 'ON CONFLICT(id) DO UPDATE path still works');
});
test('updateAssetsV2 dedupes the same way', () async {
await sut.updateUsersV1([_createUser()]);
await sut.updateAssetsV2([_createAssetV2(id: 'old-id', checksum: 'AAA', fileName: 'photo.jpg')]);
await sut.updateAssetsV2([_createAssetV2(id: 'new-id', checksum: 'AAA', fileName: 'photo.jpg')]);
final rows = await db.remoteAssetEntity.select().get();
expect(rows, hasLength(1));
expect(rows.single.id, equals('new-id'));
});
});
}
+21
View File
@@ -11,6 +11,9 @@
"required": true,
"in": "query",
"description": "Album ID",
"x-nestjs_zod-parent-metadata": {
"description": "Activity search"
},
"schema": {
"format": "uuid",
"pattern": "^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12})$",
@@ -22,6 +25,9 @@
"required": false,
"in": "query",
"description": "Asset ID (if activity is for an asset)",
"x-nestjs_zod-parent-metadata": {
"description": "Activity search"
},
"schema": {
"format": "uuid",
"pattern": "^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12})$",
@@ -32,6 +38,9 @@
"name": "level",
"required": false,
"in": "query",
"x-nestjs_zod-parent-metadata": {
"description": "Activity search"
},
"schema": {
"$ref": "#/components/schemas/ReactionLevel"
}
@@ -40,6 +49,9 @@
"name": "type",
"required": false,
"in": "query",
"x-nestjs_zod-parent-metadata": {
"description": "Activity search"
},
"schema": {
"$ref": "#/components/schemas/ReactionType"
}
@@ -49,6 +61,9 @@
"required": false,
"in": "query",
"description": "Filter by user ID",
"x-nestjs_zod-parent-metadata": {
"description": "Activity search"
},
"schema": {
"format": "uuid",
"pattern": "^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12})$",
@@ -172,6 +187,9 @@
"required": true,
"in": "query",
"description": "Album ID",
"x-nestjs_zod-parent-metadata": {
"description": "Activity"
},
"schema": {
"format": "uuid",
"pattern": "^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12})$",
@@ -183,6 +201,9 @@
"required": false,
"in": "query",
"description": "Asset ID (if activity is for an asset)",
"x-nestjs_zod-parent-metadata": {
"description": "Activity"
},
"schema": {
"format": "uuid",
"pattern": "^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12})$",
+7 -5
View File
@@ -36,16 +36,18 @@ const ActivityStatisticsResponseSchema = z
})
.meta({ id: 'ActivityStatisticsResponseDto' });
const ActivitySchema = z.object({
albumId: z.uuidv4().describe('Album ID'),
assetId: z.uuidv4().optional().describe('Asset ID (if activity is for an asset)'),
});
const ActivitySchema = z
.object({
albumId: z.uuidv4().describe('Album ID'),
assetId: z.uuidv4().optional().describe('Asset ID (if activity is for an asset)'),
})
.describe('Activity');
const ActivitySearchSchema = ActivitySchema.extend({
type: ReactionTypeSchema.optional(),
level: ReactionLevelSchema.optional(),
userId: z.uuidv4().optional().describe('Filter by user ID'),
});
}).describe('Activity search');
const ActivityCreateSchema = ActivitySchema.extend({
type: ReactionTypeSchema,