diff --git a/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_postprocessing.dart b/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_postprocessing.dart index 417b99e8e4..a01017559d 100644 --- a/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_postprocessing.dart +++ b/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_postprocessing.dart @@ -1,4 +1,3 @@ -import 'dart:developer' as dev show log; import 'dart:math' as math show max, min; import 'package:photos/services/machine_learning/face_ml/face_detection/detection.dart'; @@ -33,9 +32,6 @@ List yoloOnnxFilterExtractDetections( maxScore = result[4]; } } - dev.log( - 'No face detections found above the minScoreSigmoidThreshold of $minScoreSigmoidThreshold. The max score was $maxScore.', - ); } for (final List rawDetection in output) { diff --git a/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart b/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart index 728de95dae..7622b011d8 100644 --- a/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart +++ b/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart @@ -66,9 +66,8 @@ class FaceDetectionService extends MlModel { maintainAspectRatio: true, ); final preprocessingTime = DateTime.now(); - _logger.info( - 'Face detection preprocessing is finished, in ${preprocessingTime.difference(startTime).inMilliseconds} ms', - ); + final preprocessingMs = + preprocessingTime.difference(startTime).inMilliseconds; // Run inference List>>? nestedResults = []; @@ -81,11 +80,14 @@ class FaceDetectionService extends MlModel { inputImageList, ); // [1, 25200, 16] } + final inferenceTime = DateTime.now(); + final inferenceMs = + inferenceTime.difference(preprocessingTime).inMilliseconds; _logger.info( - 'inference is finished, in ${DateTime.now().difference(preprocessingTime).inMilliseconds} ms', + 'Face detection is finished, in ${inferenceTime.difference(startTime).inMilliseconds} ms (preprocessing: $preprocessingMs ms, inference: $inferenceMs ms)', ); } catch (e, s) { - _logger.severe('Error while running inference', e, s); + _logger.severe('Error while running inference (PlatformPlugin: ${MlModel.usePlatformPlugin})', e, s); throw YOLOFaceInterpreterRunException(); } try { @@ -121,9 +123,9 @@ class FaceDetectionService extends MlModel { outputs[0]?.value as List>>; // [1, 25200, 16] inputOrt.release(); runOptions.release(); - outputs.forEach((element) { + for (var element in outputs) { element?.release(); - }); + } return result; } diff --git a/mobile/lib/services/machine_learning/ml_indexing_isolate.dart b/mobile/lib/services/machine_learning/ml_indexing_isolate.dart index 6a0d26b45b..91dae6906b 100644 --- a/mobile/lib/services/machine_learning/ml_indexing_isolate.dart +++ b/mobile/lib/services/machine_learning/ml_indexing_isolate.dart @@ -51,7 +51,6 @@ class MLIndexingIsolate extends SuperIsolate { FileMLInstruction instruction, String filePath, ) async { - final Stopwatch stopwatch = Stopwatch()..start(); late MLResult result; try { @@ -79,15 +78,10 @@ class MLIndexingIsolate extends SuperIsolate { s, ); debugPrint( - "This image with ID ${instruction.file.uploadedFileID} has name ${instruction.file.displayName}.", + "This image with fileID ${instruction.file.uploadedFileID} has name ${instruction.file.displayName}.", ); rethrow; } - stopwatch.stop(); - _logger.info( - "Finished Analyze image with uploadedFileID ${instruction.file.uploadedFileID}, in " - "${stopwatch.elapsedMilliseconds} ms (including time waiting for inference engine availability)", - ); return result; } diff --git a/mobile/lib/services/machine_learning/ml_service.dart b/mobile/lib/services/machine_learning/ml_service.dart index 19ef46f2be..55ce37d37a 100644 --- a/mobile/lib/services/machine_learning/ml_service.dart +++ b/mobile/lib/services/machine_learning/ml_service.dart @@ -385,9 +385,6 @@ class MLService { } Future processImage(FileMLInstruction instruction) async { - _logger.info( - "`processImage` start processing image with uploadedFileID: ${instruction.file.uploadedFileID}", - ); bool actuallyRanML = false; try { @@ -420,7 +417,6 @@ class MLService { if (result.facesRan) { if (result.faces!.isEmpty) { faces.add(Face.empty(result.fileId)); - _logger.info("no face detected, storing empty for ${result.fileId}"); } if (result.faces!.isNotEmpty) { for (int i = 0; i < result.faces!.length; ++i) { @@ -432,7 +428,6 @@ class MLService { ), ); } - _logger.info("storing ${faces.length} faces for ${result.fileId}"); } dataEntity.putFace( RemoteFaceEmbedding( @@ -459,7 +454,7 @@ class MLService { instruction.file, dataEntity, ); - _logger.info("Results for file ${result.fileId} stored on remote"); + _logger.info("ML results for fileID ${result.fileId} stored on remote"); // Storing results locally if (result.facesRan) await MLDataDB.instance.bulkInsertFaces(faces); if (result.clipRan) { @@ -467,7 +462,7 @@ class MLService { result.clip!, ); } - _logger.info("Results for file ${result.fileId} stored locally"); + _logger.info("ML results for fileID ${result.fileId} stored locally"); return actuallyRanML; } catch (e, s) { final String errorString = e.toString(); @@ -480,7 +475,7 @@ class MLService { errorString.contains('FileSizeTooLargeForMobileIndexing'); if (acceptedIssue) { _logger.severe( - '$errorString with ID ${instruction.file.uploadedFileID} (format $format, type $fileType, size $size), storing empty results so indexing does not get stuck', + '$errorString for fileID ${instruction.file.uploadedFileID} (format $format, type $fileType, size $size), storing empty results so indexing does not get stuck', e, s, ); @@ -493,7 +488,7 @@ class MLService { return true; } _logger.severe( - "Failed to index file with ID: ${instruction.file.uploadedFileID} (format $format, type $fileType, size $size). Not storing any results locally, which means it will be automatically retried later.", + "Failed to index file for fileID ${instruction.file.uploadedFileID} (format $format, type $fileType, size $size). Not storing any results locally, which means it will be automatically retried later.", e, s, ); diff --git a/mobile/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart b/mobile/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart index 193e9bac14..24dd7e7bba 100644 --- a/mobile/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart +++ b/mobile/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart @@ -40,16 +40,25 @@ class ClipImageEncoder extends MlModel { final preprocessingMs = preprocessingTime.difference(startTime).inMilliseconds; late List result; - if (MlModel.usePlatformPlugin) { - result = await _runPlatformPluginPredict(inputList); - } else { - result = _runFFIBasedPredict(inputList, sessionAddress); + try { + if (MlModel.usePlatformPlugin) { + result = await _runPlatformPluginPredict(inputList); + } else { + result = _runFFIBasedPredict(inputList, sessionAddress); + } + } catch (e, stackTrace) { + _logger.severe( + "Clip image inference failed${enteFileID != null ? " with fileID $enteFileID" : ""} (PlatformPlugin: ${MlModel.usePlatformPlugin})", + e, + stackTrace, + ); + rethrow; } final inferTime = DateTime.now(); final inferenceMs = inferTime.difference(preprocessingTime).inMilliseconds; final totalMs = inferTime.difference(startTime).inMilliseconds; _logger.info( - "Clip predict took $totalMs ms${enteFileID != null ? " with fileID $enteFileID" : ""} (nference: $inferenceMs ms, preprocessing: $preprocessingMs ms)", + "Clip image predict took $totalMs ms${enteFileID != null ? " with fileID $enteFileID" : ""} (inference: $inferenceMs ms, preprocessing: $preprocessingMs ms)", ); return result; } diff --git a/mobile/lib/services/machine_learning/semantic_search/clip/clip_text_encoder.dart b/mobile/lib/services/machine_learning/semantic_search/clip/clip_text_encoder.dart index cd59fd1aa5..8a50b785f1 100644 --- a/mobile/lib/services/machine_learning/semantic_search/clip/clip_text_encoder.dart +++ b/mobile/lib/services/machine_learning/semantic_search/clip/clip_text_encoder.dart @@ -36,10 +36,19 @@ class ClipTextEncoder extends MlModel { final address = args["address"] as int; final List tokenize = await ClipTextTokenizer.instance.tokenize(text); final int32list = Int32List.fromList(tokenize); - if (MlModel.usePlatformPlugin) { - return await _runPlatformPluginPredict(int32list); - } else { - return _runFFIBasedPredict(int32list, address); + try { + if (MlModel.usePlatformPlugin) { + return await _runPlatformPluginPredict(int32list); + } else { + return _runFFIBasedPredict(int32list, address); + } + } catch (e, s) { + _logger.severe( + "Clip text inference failed (PlatformPlugin: ${MlModel.usePlatformPlugin})", + e, + s, + ); + rethrow; } } diff --git a/mobile/lib/utils/image_ml_util.dart b/mobile/lib/utils/image_ml_util.dart index 0de42f0c98..9b68ca9b3a 100644 --- a/mobile/lib/utils/image_ml_util.dart +++ b/mobile/lib/utils/image_ml_util.dart @@ -259,7 +259,6 @@ Future<(Float32List, List, List, List, Size)> int width = 112, int height = 112, }) async { - final stopwatch = Stopwatch()..start(); final Size originalSize = Size(image.width.toDouble(), image.height.toDouble()); @@ -299,31 +298,20 @@ Future<(Float32List, List, List, List, Size)> alignedImageIndex, ); - final blurDetectionStopwatch = Stopwatch()..start(); final faceGrayMatrix = _createGrayscaleIntMatrixFromNormalized2List( alignedImagesFloat32List, alignedImageIndex, ); alignedImageIndex += 3 * width * height; - final grayscalems = blurDetectionStopwatch.elapsedMilliseconds; - log('creating grayscale matrix took $grayscalems ms'); final (isBlur, blurValue) = await BlurDetectionService.predictIsBlurGrayLaplacian( faceGrayMatrix, faceDirection: face.getFaceDirection(), ); - final blurms = blurDetectionStopwatch.elapsedMilliseconds - grayscalems; - log('blur detection took $blurms ms'); - log( - 'total blur detection took ${blurDetectionStopwatch.elapsedMilliseconds} ms', - ); - blurDetectionStopwatch.stop(); isBlurs.add(isBlur); blurValues.add(blurValue); } - stopwatch.stop(); - log("Face Alignment took: ${stopwatch.elapsedMilliseconds} ms"); return ( alignedImagesFloat32List, alignmentResults, diff --git a/mobile/lib/utils/ml_util.dart b/mobile/lib/utils/ml_util.dart index 817c0bd4ea..d43a3ef7d3 100644 --- a/mobile/lib/utils/ml_util.dart +++ b/mobile/lib/utils/ml_util.dart @@ -332,7 +332,7 @@ Future getImagePathForML(EnteFile enteFile) async { imagePath = file?.path; stopwatch.stop(); _logger.info( - "Getting file data for uploadedFileID ${enteFile.uploadedFileID} took ${stopwatch.elapsedMilliseconds} ms", + "Getting file data for fileID ${enteFile.uploadedFileID} took ${stopwatch.elapsedMilliseconds} ms", ); if (imagePath == null) { @@ -388,7 +388,7 @@ Future analyzeImageStatic(Map args) async { final int clipImageAddress = args["clipImageAddress"] as int; _logger.info( - "Start analyzeImageStatic with fileID: $enteFileID (runFaces: $runFaces, runClip: $runClip)", + "Start analyzeImageStatic for fileID $enteFileID (runFaces: $runFaces, runClip: $runClip)", ); final startTime = DateTime.now(); @@ -399,9 +399,7 @@ Future analyzeImageStatic(Map args) async { final result = MLResult.fromEnteFileID(enteFileID); result.decodedImageSize = decodedImageSize; final decodeTime = DateTime.now(); - _logger.info( - 'Reading and decoding image took ${decodeTime.difference(startTime).inMilliseconds} ms (fileID: $enteFileID)', - ); + final decodeMs = decodeTime.difference(startTime).inMilliseconds; if (runFaces) { final resultFaces = await FaceRecognitionService.runFacesPipeline( @@ -417,6 +415,9 @@ Future analyzeImageStatic(Map args) async { result.faces = resultFaces; } } + final facesTime = DateTime.now(); + final facesMs = facesTime.difference(decodeTime).inMilliseconds; + final faceMsString = runFaces ? ", faces: $facesMs ms" : ""; if (runClip) { final clipResult = await SemanticSearchService.runClipImage( @@ -427,10 +428,14 @@ Future analyzeImageStatic(Map args) async { ); result.clip = clipResult; } + final clipTime = DateTime.now(); + final clipMs = clipTime.difference(facesTime).inMilliseconds; + final clipMsString = runClip ? ", clip: $clipMs ms" : ""; final endTime = DateTime.now(); + final totalMs = endTime.difference(startTime).inMilliseconds; _logger.info( - 'Finished analyzeImageStatic with fileID: $enteFileID, in ${endTime.difference(startTime).inMilliseconds} ms', + 'Finished analyzeImageStatic for fileID $enteFileID, in $totalMs ms (decode: $decodeMs ms$faceMsString$clipMsString)', ); return result;