Compare commits

...

1 Commits

Author SHA1 Message Date
Santo Shakil 8c7bd28864 fix(mobile): run iOS bg task phases in parallel
onIosUpload runs sync local, sync remote, hash and handle backup
sequentially. on the bg refresh task path that's a 20s budget from
iOS, and sync + hash usually eat all of it before backup gets a turn
to enqueue any candidates.

these phases don't actually depend on each other. local + remote sync
touch different tables. hash works off whatever's already in drift.
handle backup reads candidates and just enqueues to URLSession bg.
anything one phase produces in this fire shows up to the others on
the next fire, and server-side dedup catches the rare race where
backup enqueues something sync remote was about to mark as already
uploaded.

so this runs all four concurrently via Future.wait, with hash getting
the full maxSeconds-1 budget instead of a fixed 5s. outer budget
timeout still caps everything before iOS expires.

second small change: getAssetsToHash orders by createdAt DESC instead
of id ASC to match getCandidates. when hash runs inside a refresh
fire it processes recent photos first.
2026-05-08 16:42:03 +06:00
2 changed files with 21 additions and 7 deletions
@@ -128,17 +128,31 @@ class BackgroundWorkerBgService extends BackgroundWorkerFlutterApi {
_logger.info('iOS background upload started with maxSeconds: ${maxSeconds}s');
final sw = Stopwatch()..start();
try {
final timeout = isRefresh ? const Duration(seconds: 5) : Duration(minutes: _isBackupEnabled ? 3 : 6);
if (!await _syncAssets(hashTimeout: timeout)) {
_logger.warning("Remote sync did not complete successfully, skipping backup");
final hashTimeout = isRefresh
? Duration(seconds: (maxSeconds ?? 20) - 1)
: Duration(minutes: _isBackupEnabled ? 3 : 6);
final budget = maxSeconds != null ? Duration(seconds: maxSeconds - 1) : null;
final sync = _ref?.read(backgroundSyncProvider);
if (sync == null) {
return;
}
// Run sync local, sync remote, hash and backup concurrently so the bg
// refresh task (20s budget) can make progress on all four instead of
// racing them sequentially. Phases are independent at the data layer:
// hash and handle_backup read drift state and tolerate stale reads
// (server-side dedup catches the rare race).
final localFuture = sync.syncLocal();
final remoteFuture = sync.syncRemote();
final hashFuture = sync.hashAssets().timeout(hashTimeout, onTimeout: () {});
final backupFuture = _handleBackup();
if (maxSeconds != null) {
await backupFuture.timeout(Duration(seconds: maxSeconds - 1), onTimeout: () {});
final all = Future.wait<dynamic>([localFuture, remoteFuture, hashFuture, backupFuture]);
if (budget != null) {
await all.timeout(budget, onTimeout: () => <dynamic>[]);
} else {
await backupFuture;
await all;
}
} catch (error, stack) {
_logger.severe("Failed to complete iOS background upload", error, stack);
@@ -241,7 +241,7 @@ class DriftLocalAlbumRepository extends DriftDatabaseRepository {
innerJoin(_db.localAssetEntity, _db.localAlbumAssetEntity.assetId.equalsExp(_db.localAssetEntity.id)),
])
..where(_db.localAlbumAssetEntity.albumId.equals(albumId) & _db.localAssetEntity.checksum.isNull())
..orderBy([OrderingTerm.asc(_db.localAssetEntity.id)]);
..orderBy([OrderingTerm.desc(_db.localAssetEntity.createdAt)]);
return query.map((row) => row.readTable(_db.localAssetEntity).toDto()).get();
}