From 40d4b21be98112a7eb0bf46325a8e9f2877fcaf4 Mon Sep 17 00:00:00 2001 From: Prateek Sunal Date: Tue, 19 Aug 2025 14:10:12 +0530 Subject: [PATCH] chore: add heavy logging --- .../machine_learning/compute_controller.dart | 1 + .../face_detection_service.dart | 26 +++++++++++++++++++ .../face_ml/face_recognition_service.dart | 19 ++++++++++++++ .../clip/clip_image_encoder.dart | 3 +++ .../semantic_search_service.dart | 7 +++++ .../services/sync/remote_sync_service.dart | 11 +++++--- .../apps/photos/lib/utils/image_ml_util.dart | 17 +++++++++++- .../lib/utils/isolate/super_isolate.dart | 17 +++++------- mobile/apps/photos/lib/utils/ml_util.dart | 10 +++++++ 9 files changed, 95 insertions(+), 16 deletions(-) diff --git a/mobile/apps/photos/lib/services/machine_learning/compute_controller.dart b/mobile/apps/photos/lib/services/machine_learning/compute_controller.dart index ec8787eaaf..54a9801a14 100644 --- a/mobile/apps/photos/lib/services/machine_learning/compute_controller.dart +++ b/mobile/apps/photos/lib/services/machine_learning/compute_controller.dart @@ -163,6 +163,7 @@ class ComputeController { } void _fireControlEvent() { + return; final shouldRunCompute = _isDeviceHealthy && _canRunGivenUserInteraction(); if (shouldRunCompute != _canRunCompute) { _canRunCompute = shouldRunCompute; diff --git a/mobile/apps/photos/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart b/mobile/apps/photos/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart index 2fc7c82b47..bb3b50c6b0 100644 --- a/mobile/apps/photos/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart +++ b/mobile/apps/photos/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart @@ -54,12 +54,19 @@ class FaceDetectionService extends MlModel { 'sessionAddress should be valid', ); + _logger.info( + "Running face detection for image with size ${dimensions.width}x${dimensions.height}", + ); + final startTime = DateTime.now(); final (inputImageList, scaledSize) = await preprocessImageYoloFace( dimensions, rawRgbaBytes, ); + _logger.info( + "Preprocessed image to input list of size ${inputImageList.length} with scaled size $scaledSize", + ); final preprocessingTime = DateTime.now(); final preprocessingMs = preprocessingTime.difference(startTime).inMilliseconds; @@ -68,8 +75,14 @@ class FaceDetectionService extends MlModel { List>>? nestedResults = []; try { if (MlModel.usePlatformPlugin) { + _logger.info( + "Running inference using platform plugin", + ); nestedResults = await _runPlatformPluginPredict(inputImageList); } else { + _logger.info( + "Running inference using ONNX runtime", + ); nestedResults = _runFFIBasedPredict( sessionAddress, inputImageList, @@ -116,9 +129,15 @@ class FaceDetectionService extends MlModel { final inputs = {'input': inputOrt}; final runOptions = OrtRunOptions(); final session = OrtSession.fromAddress(sessionAddress); + _logger.info( + "Running face detection using ONNX runtime with input size ${inputImageList.length}", + ); final List outputs = session.run(runOptions, inputs); final result = outputs[0]?.value as List>>; // [1, 25200, 16] + _logger.info( + "Finished running face detection using ONNX runtime", + ); inputOrt.release(); runOptions.release(); for (var element in outputs) { @@ -132,11 +151,18 @@ class FaceDetectionService extends MlModel { Float32List inputImageList, ) async { final OnnxDart plugin = OnnxDart(); + _logger.info( + "Running face detection using OnnxDart plugin with input size ${inputImageList.length}", + ); final result = await plugin.predict( inputImageList, _modelName, ); + _logger.info( + "Finished running face detection using OnnxDart plugin", + ); + final int resultLength = result!.length; assert(resultLength % 25200 * 16 == 0); const int outerLength = 1; diff --git a/mobile/apps/photos/lib/services/machine_learning/face_ml/face_recognition_service.dart b/mobile/apps/photos/lib/services/machine_learning/face_ml/face_recognition_service.dart index f500157fcf..d81e8e2f51 100644 --- a/mobile/apps/photos/lib/services/machine_learning/face_ml/face_recognition_service.dart +++ b/mobile/apps/photos/lib/services/machine_learning/face_ml/face_recognition_service.dart @@ -81,6 +81,9 @@ class FaceRecognitionService { final faceResults = []; final startTime = DateTime.now(); + _logger.info( + "Starting runFacesPipeline with fileID $enteFileID", + ); // Get the faces final List faceDetectionResult = await _detectFacesSync( @@ -90,6 +93,9 @@ class FaceRecognitionService { faceDetectionAddress, faceResults, ); + _logger.info( + "Detected ${faceDetectionResult.length} faces in image with fileID $enteFileID", + ); final detectFacesTime = DateTime.now(); final detectFacesMs = detectFacesTime.difference(startTime).inMilliseconds; @@ -101,6 +107,9 @@ class FaceRecognitionService { return []; } + _logger.info( + "Detected ${faceDetectionResult.length} faces, proceeding to alignment and embedding", + ); // Align the faces final Float32List faceAlignmentResult = await _alignFacesSync( dim, @@ -112,6 +121,9 @@ class FaceRecognitionService { final alignFacesMs = alignFacesTime.difference(detectFacesTime).inMilliseconds; + _logger.info( + "Aligned ${faceDetectionResult.length} faces in image with fileID $enteFileID", + ); // Get the embeddings of the faces await _embedFacesSync( faceAlignmentResult, @@ -139,6 +151,9 @@ class FaceRecognitionService { List faceResults, ) async { try { + _logger.info( + "Running face detection for fileID $fileID with interpreter at $interpreterAddress", + ); // Get the bounding boxes of the faces final List faces = await FaceDetectionService.predict( @@ -147,6 +162,10 @@ class FaceRecognitionService { interpreterAddress, ); + _logger.info( + "Detected ${faces.length} faces in image with fileID $fileID", + ); + // Add detected faces to the faceResults for (var i = 0; i < faces.length; i++) { faceResults.add( diff --git a/mobile/apps/photos/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart b/mobile/apps/photos/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart index 88ff7f691c..8d6fdf56c8 100644 --- a/mobile/apps/photos/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart +++ b/mobile/apps/photos/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart @@ -86,6 +86,9 @@ class ClipImageEncoder extends MlModel { Float32List inputList, ) async { final OnnxDart plugin = OnnxDart(); + _logger.info( + "Running Clip image predict using OnnxDart plugin with input size ${inputList.length}", + ); final result = await plugin.predict( inputList, _modelName, diff --git a/mobile/apps/photos/lib/services/machine_learning/semantic_search/semantic_search_service.dart b/mobile/apps/photos/lib/services/machine_learning/semantic_search/semantic_search_service.dart index 0977c7e534..1ab8d6da5d 100644 --- a/mobile/apps/photos/lib/services/machine_learning/semantic_search/semantic_search_service.dart +++ b/mobile/apps/photos/lib/services/machine_learning/semantic_search/semantic_search_service.dart @@ -307,6 +307,9 @@ class SemanticSearchService { Uint8List rawRgbaBytes, int clipImageAddress, ) async { + _logger.info( + "Running Clip image encoding for file ID: $enteFileID", + ); final embedding = await ClipImageEncoder.predict( dimensions, rawRgbaBytes, @@ -314,6 +317,10 @@ class SemanticSearchService { enteFileID, ); + _logger.info( + "Clip image encoding completed for file ID: $enteFileID", + ); + final clipResult = ClipResult(fileID: enteFileID, embedding: embedding); return clipResult; diff --git a/mobile/apps/photos/lib/services/sync/remote_sync_service.dart b/mobile/apps/photos/lib/services/sync/remote_sync_service.dart index 9e2f7f6aed..a227e63e80 100644 --- a/mobile/apps/photos/lib/services/sync/remote_sync_service.dart +++ b/mobile/apps/photos/lib/services/sync/remote_sync_service.dart @@ -95,6 +95,10 @@ class RemoteSyncService { } Future sync({bool silently = false}) async { + // TODO: remove + // if (!isProcessBg) { + // return; + // } if (!_config.hasConfiguredAccount()) { _logger.info("Skipping remote sync since account is not configured"); return; @@ -377,10 +381,9 @@ class RemoteSyncService { localIDsToSync.removeAll(alreadyClaimedLocalIDs); if (alreadyClaimedLocalIDs.isNotEmpty && !_hasCleanupStaleEntry) { try { - await _db.removeQueuedLocalFiles(alreadyClaimedLocalIDs); - } catch(e, s) { - _logger.severe("removeQueuedLocalFiles failed",e,s); - + await _db.removeQueuedLocalFiles(alreadyClaimedLocalIDs); + } catch (e, s) { + _logger.severe("removeQueuedLocalFiles failed", e, s); } } } diff --git a/mobile/apps/photos/lib/utils/image_ml_util.dart b/mobile/apps/photos/lib/utils/image_ml_util.dart index cac6a45ed4..49d95f8bfe 100644 --- a/mobile/apps/photos/lib/utils/image_ml_util.dart +++ b/mobile/apps/photos/lib/utils/image_ml_util.dart @@ -83,11 +83,17 @@ Future decodeImageFromPath( "Failed to decode image from file: $imagePath using image package", ); } - final bytes = imageData.getBytes(order: img_pkg.ChannelOrder.rgba); + + Uint8List? bytes; + for (final order in img_pkg.ChannelOrder.values) { + bytes = imageData.getBytes(order: order); + _logger.info("Bytes length is: ${bytes.length}, for order: : $order"); + } final dimensions = Dimensions( width: image!.width, height: image.height, ); + _logger.info("Dimensions are: $dimensions"); return DecodedImage( dimensions: dimensions, rawRgbaBytes: bytes, @@ -111,6 +117,9 @@ Future decodeImageFromPath( } late Image image; + _logger.info( + 'Decoding image at path: $imagePath, format: $format, includeRgbaBytes: $includeRgbaBytes', + ); try { image = await decodeImageFromData(imageData); } catch (e, s) { @@ -144,12 +153,18 @@ Future decodeImageFromPath( ); } } + _logger.info( + "Decoded image at path: $imagePath [i]", + ); if (!includeRgbaBytes) { return DecodedImage( dimensions: Dimensions(width: image.width, height: image.height), image: includeDartUiImage ? image : null, ); } + _logger.info( + "Getting Raw RGBA", + ); final rawRgbaBytes = await _getRawRgbaBytes(image); return DecodedImage( dimensions: Dimensions(width: image.width, height: image.height), diff --git a/mobile/apps/photos/lib/utils/isolate/super_isolate.dart b/mobile/apps/photos/lib/utils/isolate/super_isolate.dart index f1bae4dfaf..2534133eb8 100644 --- a/mobile/apps/photos/lib/utils/isolate/super_isolate.dart +++ b/mobile/apps/photos/lib/utils/isolate/super_isolate.dart @@ -125,11 +125,11 @@ abstract class SuperIsolate { final answerPort = ReceivePort(); _activeTasks++; - // TODO: remove it after we're done - logger.info("new activity ${operation.name}, activeTasks $_activeTasks"); final taskID = newIsolateTaskID(operation.name); _mainSendPort.send([taskID, operation.index, args, answerPort.sendPort]); + logger.info("Activity ${operation.name} started"); + answerPort.listen((receivedMessage) { if (receivedMessage['taskID'] != taskID) { logger.severe("Received isolate message with wrong taskID"); @@ -138,6 +138,7 @@ abstract class SuperIsolate { final logs = receivedMessage['logs'] as List; IsolateLogger.handLogStringsToMainLogger(logs); final data = receivedMessage['data']; + if (data is Map && data.containsKey('error')) { // Handle the error final errorMessage = data['error']; @@ -145,19 +146,13 @@ abstract class SuperIsolate { final exception = Exception(errorMessage); final stackTrace = StackTrace.fromString(errorStackTrace); completer.completeError(exception, stackTrace); - logger.warning( - "operation ${operation.name}, completed with warnings", - errorMessage, - errorStackTrace, - ); + logger.severe("Activity ${operation.name} failed"); } else { completer.complete(data); - logger.info("operation ${operation.name}, completed successfully"); + logger.info("Activity ${operation.name} completed"); } + _activeTasks--; }); - _activeTasks--; - // TODO: remove it after we're done - logger.info("new activity ${operation.name}, activeTasks $_activeTasks"); return completer.future; }); diff --git a/mobile/apps/photos/lib/utils/ml_util.dart b/mobile/apps/photos/lib/utils/ml_util.dart index c1d2f972ed..2f79fd4231 100644 --- a/mobile/apps/photos/lib/utils/ml_util.dart +++ b/mobile/apps/photos/lib/utils/ml_util.dart @@ -7,6 +7,7 @@ import "package:photos/db/files_db.dart"; import "package:photos/db/ml/db.dart"; import "package:photos/db/ml/filedata.dart"; import "package:photos/extensions/list.dart"; +import "package:photos/main.dart"; import "package:photos/models/file/extensions/file_props.dart"; import "package:photos/models/file/file.dart"; import "package:photos/models/file/file_type.dart"; @@ -411,19 +412,28 @@ Future analyzeImageStatic(Map args) async { ); final startTime = DateTime.now(); + _logger.info("Decoding image at path: $imagePath"); // Decode the image once to use for both face detection and alignment final decodedImage = await decodeImageFromPath( imagePath, includeRgbaBytes: true, includeDartUiImage: false, + inBackground: isProcessBg, ); final rawRgbaBytes = decodedImage.rawRgbaBytes!; final imageDimensions = decodedImage.dimensions; + _logger.info( + "Decoded image with rgbaLength: ${rawRgbaBytes.length}, dimensions: $imageDimensions", + ); final result = MLResult.fromEnteFileID(enteFileID); result.decodedImageSize = imageDimensions; final decodeTime = DateTime.now(); final decodeMs = decodeTime.difference(startTime).inMilliseconds; + _logger.info( + "Decoded image at path: $imagePath, in $decodeMs ms", + ); + String faceMsString = "", clipMsString = ""; final pipelines = await Future.wait([ runFaces