Compare commits

...

9 Commits
main ... bg-ml

Author SHA1 Message Date
Prateek Sunal
40d4b21be9 chore: add heavy logging 2025-08-19 14:10:12 +05:30
Prateek Sunal
7027edb2d1 Merge remote-tracking branch 'origin/main' into bg-ml 2025-08-19 12:48:27 +05:30
Prateek Sunal
831720a49d Merge remote-tracking branch 'origin/decoded_image_refactor' into bg-ml 2025-08-19 12:48:20 +05:30
laurenspriem
24b88a186c Decode image in background using image pkg 2025-08-19 11:05:39 +05:30
laurenspriem
49b9d83f05 refactor to not use ui.image unnecessarily everywhere 2025-08-19 10:52:59 +05:30
laurenspriem
273d7bd00a ignore rust_builder linting errors 2025-08-19 10:38:08 +05:30
laurenspriem
4e8991dc10 remove unused import 2025-08-19 10:25:42 +05:30
Prateek Sunal
ecf58c175b fix: add more things for testing 2025-08-13 19:51:05 +05:30
Prateek Sunal
cc0198f879 fix: add more logs for testing 2025-08-13 14:37:23 +05:30
13 changed files with 321 additions and 139 deletions

View File

@@ -22,7 +22,6 @@ linter:
- use_key_in_widget_constructors - use_key_in_widget_constructors
- cancel_subscriptions - cancel_subscriptions
- avoid_empty_else - avoid_empty_else
- exhaustive_cases - exhaustive_cases
@@ -63,7 +62,6 @@ analyzer:
unrelated_type_equality_checks: error unrelated_type_equality_checks: error
unnecessary_cast: info unnecessary_cast: info
unawaited_futures: warning # convert to warning after fixing existing issues unawaited_futures: warning # convert to warning after fixing existing issues
invalid_dependency: info invalid_dependency: info
use_build_context_synchronously: ignore # experimental lint, requires many changes use_build_context_synchronously: ignore # experimental lint, requires many changes
@@ -74,3 +72,4 @@ analyzer:
exclude: exclude:
- thirdparty/** - thirdparty/**
- lib/generated/** - lib/generated/**
- rust_builder/**

View File

@@ -185,10 +185,13 @@ Future<void> _runMinimally(String taskId, TimeLogger tlog) async {
// only runs for android // only runs for android
await _homeWidgetSync(true); await _homeWidgetSync(true);
// await MLService.instance.init(); final isDeviceHealthy = await computeController.isDeviceHealthyFuture();
// await PersonService.init(entityService, MLDataDB.instance, prefs); if (isDeviceHealthy) {
// await MLService.instance.runAllML(force: true); await MLService.instance.init();
await smartAlbumsService.syncSmartAlbums(); await PersonService.init(entityService, MLDataDB.instance, prefs);
await MLService.instance.runAllML(force: true);
await smartAlbumsService.syncSmartAlbums();
}
} }
Future<void> _init(bool isBackground, {String via = ''}) async { Future<void> _init(bool isBackground, {String via = ''}) async {

View File

@@ -8,6 +8,7 @@ import "package:flutter/foundation.dart";
import "package:logging/logging.dart"; import "package:logging/logging.dart";
import "package:photos/core/event_bus.dart"; import "package:photos/core/event_bus.dart";
import "package:photos/events/compute_control_event.dart"; import "package:photos/events/compute_control_event.dart";
import "package:photos/main.dart";
import "package:thermal/thermal.dart"; import "package:thermal/thermal.dart";
enum _ComputeRunState { enum _ComputeRunState {
@@ -42,6 +43,13 @@ class ComputeController {
ComputeController() { ComputeController() {
_logger.info('ComputeController constructor'); _logger.info('ComputeController constructor');
// we don't need listeners to be initialized in background
if (isProcessBg) {
_logger.info('init done ');
return;
}
_startInteractionTimer(kDefaultInteractionTimeout); _startInteractionTimer(kDefaultInteractionTimeout);
if (Platform.isIOS) { if (Platform.isIOS) {
if (kDebugMode) { if (kDebugMode) {
@@ -71,6 +79,8 @@ class ComputeController {
} }
bool requestCompute({bool ml = false, bool stream = false}) { bool requestCompute({bool ml = false, bool stream = false}) {
// TODO: Remove after testing
return false;
_logger.info("Requesting compute: ml: $ml, stream: $stream"); _logger.info("Requesting compute: ml: $ml, stream: $stream");
if (!_isDeviceHealthy || !_canRunGivenUserInteraction()) { if (!_isDeviceHealthy || !_canRunGivenUserInteraction()) {
_logger.info("Device not healthy or user interacting, denying request."); _logger.info("Device not healthy or user interacting, denying request.");
@@ -153,6 +163,7 @@ class ComputeController {
} }
void _fireControlEvent() { void _fireControlEvent() {
return;
final shouldRunCompute = _isDeviceHealthy && _canRunGivenUserInteraction(); final shouldRunCompute = _isDeviceHealthy && _canRunGivenUserInteraction();
if (shouldRunCompute != _canRunCompute) { if (shouldRunCompute != _canRunCompute) {
_canRunCompute = shouldRunCompute; _canRunCompute = shouldRunCompute;
@@ -175,6 +186,25 @@ class ComputeController {
_startInteractionTimer(kDefaultInteractionTimeout); _startInteractionTimer(kDefaultInteractionTimeout);
} }
Future<bool> isDeviceHealthyFuture() async {
if (!isProcessBg) return isDeviceHealthy;
// Update Thermal status
_lastThermalStatus = await _thermal.thermalStatus;
// Update Battery info and device health
if (Platform.isIOS) {
_iosLastBatteryInfo = await BatteryInfoPlugin().iosBatteryInfo;
_isDeviceHealthy = _computeIsiOSDeviceHealthy();
} else {
_androidLastBatteryInfo = await BatteryInfoPlugin().androidBatteryInfo;
_isDeviceHealthy = _computeIsAndroidDeviceHealthy();
}
_logger.info("Device health status: $_isDeviceHealthy");
return _isDeviceHealthy;
}
void _onAndroidBatteryStateUpdate(AndroidBatteryInfo? batteryInfo) { void _onAndroidBatteryStateUpdate(AndroidBatteryInfo? batteryInfo) {
_androidLastBatteryInfo = batteryInfo; _androidLastBatteryInfo = batteryInfo;
_logger.info("Battery info: ${batteryInfo!.toJson()}"); _logger.info("Battery info: ${batteryInfo!.toJson()}");

View File

@@ -1,6 +1,5 @@
import "dart:async"; import "dart:async";
import 'dart:typed_data' show Float32List, Uint8List; import 'dart:typed_data' show Float32List, Uint8List;
import 'dart:ui' as ui show Image;
import 'package:logging/logging.dart'; import 'package:logging/logging.dart';
import "package:onnx_dart/onnx_dart.dart"; import "package:onnx_dart/onnx_dart.dart";
@@ -44,7 +43,7 @@ class FaceDetectionService extends MlModel {
/// Detects faces in the given image data. /// Detects faces in the given image data.
static Future<List<FaceDetectionRelative>> predict( static Future<List<FaceDetectionRelative>> predict(
ui.Image image, Dimensions dimensions,
Uint8List rawRgbaBytes, Uint8List rawRgbaBytes,
int sessionAddress, int sessionAddress,
) async { ) async {
@@ -55,12 +54,19 @@ class FaceDetectionService extends MlModel {
'sessionAddress should be valid', 'sessionAddress should be valid',
); );
_logger.info(
"Running face detection for image with size ${dimensions.width}x${dimensions.height}",
);
final startTime = DateTime.now(); final startTime = DateTime.now();
final (inputImageList, scaledSize) = await preprocessImageYoloFace( final (inputImageList, scaledSize) = await preprocessImageYoloFace(
image, dimensions,
rawRgbaBytes, rawRgbaBytes,
); );
_logger.info(
"Preprocessed image to input list of size ${inputImageList.length} with scaled size $scaledSize",
);
final preprocessingTime = DateTime.now(); final preprocessingTime = DateTime.now();
final preprocessingMs = final preprocessingMs =
preprocessingTime.difference(startTime).inMilliseconds; preprocessingTime.difference(startTime).inMilliseconds;
@@ -69,8 +75,14 @@ class FaceDetectionService extends MlModel {
List<List<List<double>>>? nestedResults = []; List<List<List<double>>>? nestedResults = [];
try { try {
if (MlModel.usePlatformPlugin) { if (MlModel.usePlatformPlugin) {
_logger.info(
"Running inference using platform plugin",
);
nestedResults = await _runPlatformPluginPredict(inputImageList); nestedResults = await _runPlatformPluginPredict(inputImageList);
} else { } else {
_logger.info(
"Running inference using ONNX runtime",
);
nestedResults = _runFFIBasedPredict( nestedResults = _runFFIBasedPredict(
sessionAddress, sessionAddress,
inputImageList, inputImageList,
@@ -117,9 +129,15 @@ class FaceDetectionService extends MlModel {
final inputs = {'input': inputOrt}; final inputs = {'input': inputOrt};
final runOptions = OrtRunOptions(); final runOptions = OrtRunOptions();
final session = OrtSession.fromAddress(sessionAddress); final session = OrtSession.fromAddress(sessionAddress);
_logger.info(
"Running face detection using ONNX runtime with input size ${inputImageList.length}",
);
final List<OrtValue?> outputs = session.run(runOptions, inputs); final List<OrtValue?> outputs = session.run(runOptions, inputs);
final result = final result =
outputs[0]?.value as List<List<List<double>>>; // [1, 25200, 16] outputs[0]?.value as List<List<List<double>>>; // [1, 25200, 16]
_logger.info(
"Finished running face detection using ONNX runtime",
);
inputOrt.release(); inputOrt.release();
runOptions.release(); runOptions.release();
for (var element in outputs) { for (var element in outputs) {
@@ -133,11 +151,18 @@ class FaceDetectionService extends MlModel {
Float32List inputImageList, Float32List inputImageList,
) async { ) async {
final OnnxDart plugin = OnnxDart(); final OnnxDart plugin = OnnxDart();
_logger.info(
"Running face detection using OnnxDart plugin with input size ${inputImageList.length}",
);
final result = await plugin.predict( final result = await plugin.predict(
inputImageList, inputImageList,
_modelName, _modelName,
); );
_logger.info(
"Finished running face detection using OnnxDart plugin",
);
final int resultLength = result!.length; final int resultLength = result!.length;
assert(resultLength % 25200 * 16 == 0); assert(resultLength % 25200 * 16 == 0);
const int outerLength = 1; const int outerLength = 1;

View File

@@ -1,11 +1,11 @@
import "dart:async" show unawaited; import "dart:async" show unawaited;
import "dart:typed_data" show Uint8List, Float32List; import "dart:typed_data" show Uint8List, Float32List;
import "dart:ui" show Image;
import "package:logging/logging.dart"; import "package:logging/logging.dart";
import "package:photos/core/event_bus.dart"; import "package:photos/core/event_bus.dart";
import "package:photos/events/diff_sync_complete_event.dart"; import "package:photos/events/diff_sync_complete_event.dart";
import "package:photos/events/people_changed_event.dart"; import "package:photos/events/people_changed_event.dart";
import "package:photos/models/ml/face/dimension.dart";
import "package:photos/services/machine_learning/face_ml/face_detection/detection.dart"; import "package:photos/services/machine_learning/face_ml/face_detection/detection.dart";
import "package:photos/services/machine_learning/face_ml/face_detection/face_detection_service.dart"; import "package:photos/services/machine_learning/face_ml/face_detection/face_detection_service.dart";
import "package:photos/services/machine_learning/face_ml/face_embedding/face_embedding_service.dart"; import "package:photos/services/machine_learning/face_ml/face_embedding/face_embedding_service.dart";
@@ -73,7 +73,7 @@ class FaceRecognitionService {
static Future<List<FaceResult>> runFacesPipeline( static Future<List<FaceResult>> runFacesPipeline(
int enteFileID, int enteFileID,
Image image, Dimensions dim,
Uint8List rawRgbaBytes, Uint8List rawRgbaBytes,
int faceDetectionAddress, int faceDetectionAddress,
int faceEmbeddingAddress, int faceEmbeddingAddress,
@@ -81,15 +81,21 @@ class FaceRecognitionService {
final faceResults = <FaceResult>[]; final faceResults = <FaceResult>[];
final startTime = DateTime.now(); final startTime = DateTime.now();
_logger.info(
"Starting runFacesPipeline with fileID $enteFileID",
);
// Get the faces // Get the faces
final List<FaceDetectionRelative> faceDetectionResult = final List<FaceDetectionRelative> faceDetectionResult =
await _detectFacesSync( await _detectFacesSync(
enteFileID, enteFileID,
image, dim,
rawRgbaBytes, rawRgbaBytes,
faceDetectionAddress, faceDetectionAddress,
faceResults, faceResults,
); );
_logger.info(
"Detected ${faceDetectionResult.length} faces in image with fileID $enteFileID",
);
final detectFacesTime = DateTime.now(); final detectFacesTime = DateTime.now();
final detectFacesMs = detectFacesTime.difference(startTime).inMilliseconds; final detectFacesMs = detectFacesTime.difference(startTime).inMilliseconds;
@@ -101,9 +107,12 @@ class FaceRecognitionService {
return []; return [];
} }
_logger.info(
"Detected ${faceDetectionResult.length} faces, proceeding to alignment and embedding",
);
// Align the faces // Align the faces
final Float32List faceAlignmentResult = await _alignFacesSync( final Float32List faceAlignmentResult = await _alignFacesSync(
image, dim,
rawRgbaBytes, rawRgbaBytes,
faceDetectionResult, faceDetectionResult,
faceResults, faceResults,
@@ -112,6 +121,9 @@ class FaceRecognitionService {
final alignFacesMs = final alignFacesMs =
alignFacesTime.difference(detectFacesTime).inMilliseconds; alignFacesTime.difference(detectFacesTime).inMilliseconds;
_logger.info(
"Aligned ${faceDetectionResult.length} faces in image with fileID $enteFileID",
);
// Get the embeddings of the faces // Get the embeddings of the faces
await _embedFacesSync( await _embedFacesSync(
faceAlignmentResult, faceAlignmentResult,
@@ -133,20 +145,27 @@ class FaceRecognitionService {
/// Runs face recognition on the given image data. /// Runs face recognition on the given image data.
static Future<List<FaceDetectionRelative>> _detectFacesSync( static Future<List<FaceDetectionRelative>> _detectFacesSync(
int fileID, int fileID,
Image image, Dimensions dimensions,
Uint8List rawRgbaBytes, Uint8List rawRgbaBytes,
int interpreterAddress, int interpreterAddress,
List<FaceResult> faceResults, List<FaceResult> faceResults,
) async { ) async {
try { try {
_logger.info(
"Running face detection for fileID $fileID with interpreter at $interpreterAddress",
);
// Get the bounding boxes of the faces // Get the bounding boxes of the faces
final List<FaceDetectionRelative> faces = final List<FaceDetectionRelative> faces =
await FaceDetectionService.predict( await FaceDetectionService.predict(
image, dimensions,
rawRgbaBytes, rawRgbaBytes,
interpreterAddress, interpreterAddress,
); );
_logger.info(
"Detected ${faces.length} faces in image with fileID $fileID",
);
// Add detected faces to the faceResults // Add detected faces to the faceResults
for (var i = 0; i < faces.length; i++) { for (var i = 0; i < faces.length; i++) {
faceResults.add( faceResults.add(
@@ -169,7 +188,7 @@ class FaceRecognitionService {
/// Aligns multiple faces from the given image data. /// Aligns multiple faces from the given image data.
/// Returns a list of the aligned faces as image data. /// Returns a list of the aligned faces as image data.
static Future<Float32List> _alignFacesSync( static Future<Float32List> _alignFacesSync(
Image image, Dimensions dim,
Uint8List rawRgbaBytes, Uint8List rawRgbaBytes,
List<FaceDetectionRelative> faces, List<FaceDetectionRelative> faces,
List<FaceResult> faceResults, List<FaceResult> faceResults,
@@ -177,7 +196,7 @@ class FaceRecognitionService {
try { try {
final (alignedFaces, alignmentResults, _, blurValues, _) = final (alignedFaces, alignmentResults, _, blurValues, _) =
await preprocessToMobileFaceNetFloat32List( await preprocessToMobileFaceNetFloat32List(
image, dim,
rawRgbaBytes, rawRgbaBytes,
faces, faces,
); );

View File

@@ -10,6 +10,7 @@ import "package:photos/db/files_db.dart";
import "package:photos/db/ml/db.dart"; import "package:photos/db/ml/db.dart";
import "package:photos/events/compute_control_event.dart"; import "package:photos/events/compute_control_event.dart";
import "package:photos/events/people_changed_event.dart"; import "package:photos/events/people_changed_event.dart";
import "package:photos/main.dart";
import "package:photos/models/ml/face/face.dart"; import "package:photos/models/ml/face/face.dart";
import "package:photos/models/ml/ml_versions.dart"; import "package:photos/models/ml/ml_versions.dart";
import "package:photos/service_locator.dart"; import "package:photos/service_locator.dart";
@@ -69,31 +70,36 @@ class MLService {
_logger.info("client: $client"); _logger.info("client: $client");
// Listen on ComputeController // Listen on ComputeController
Bus.instance.on<ComputeControlEvent>().listen((event) { /// Only listen for events when in foreground,
if (!flagService.hasGrantedMLConsent) { /// so we don't waste resources when the app is in background
return; /// and we just do things sequentially
} if (!isProcessBg) {
Bus.instance.on<ComputeControlEvent>().listen((event) {
if (!flagService.hasGrantedMLConsent) {
return;
}
_mlControllerStatus = event.shouldRun; _mlControllerStatus = event.shouldRun;
if (_mlControllerStatus) { if (_mlControllerStatus) {
if (_shouldPauseIndexingAndClustering) { if (_shouldPauseIndexingAndClustering) {
_cancelPauseIndexingAndClustering(); _cancelPauseIndexingAndClustering();
_logger.info( _logger.info(
"MLController allowed running ML, faces indexing undoing previous pause", "MLController allowed running ML, faces indexing undoing previous pause",
); );
} else {
_logger.info(
"MLController allowed running ML, faces indexing starting",
);
}
unawaited(runAllML());
} else { } else {
_logger.info( _logger.info(
"MLController allowed running ML, faces indexing starting", "MLController stopped running ML, faces indexing will be paused (unless it's fetching embeddings)",
); );
pauseIndexingAndClustering();
} }
unawaited(runAllML()); });
} else { }
_logger.info(
"MLController stopped running ML, faces indexing will be paused (unless it's fetching embeddings)",
);
pauseIndexingAndClustering();
}
});
_isInitialized = true; _isInitialized = true;
_logger.info('init done'); _logger.info('init done');
@@ -136,7 +142,7 @@ class MLService {
); );
await clusterAllImages(); await clusterAllImages();
} }
if (_mlControllerStatus == true) { if (!isProcessBg && _mlControllerStatus == true) {
// refresh discover section // refresh discover section
magicCacheService.updateCache(forced: force).ignore(); magicCacheService.updateCache(forced: force).ignore();
// refresh memories section // refresh memories section
@@ -148,7 +154,7 @@ class MLService {
if ((await mlDataDB.getUnclusteredFaceCount()) > 0) { if ((await mlDataDB.getUnclusteredFaceCount()) > 0) {
await clusterAllImages(); await clusterAllImages();
} }
if (_mlControllerStatus == true) { if (!isProcessBg && _mlControllerStatus == true) {
// refresh discover section // refresh discover section
magicCacheService.updateCache().ignore(); magicCacheService.updateCache().ignore();
// refresh memories section (only runs if forced is true) // refresh memories section (only runs if forced is true)
@@ -160,8 +166,10 @@ class MLService {
} finally { } finally {
_logger.severe("ML finished running"); _logger.severe("ML finished running");
_isRunningML = false; _isRunningML = false;
computeController.releaseCompute(ml: true); if (!isProcessBg) {
VideoPreviewService.instance.queueFiles(); computeController.releaseCompute(ml: true);
VideoPreviewService.instance.queueFiles();
}
} }
} }

View File

@@ -1,9 +1,9 @@
import "dart:typed_data" show Uint8List, Float32List; import "dart:typed_data" show Uint8List, Float32List;
import "dart:ui" show Image;
import "package:logging/logging.dart"; import "package:logging/logging.dart";
import "package:onnx_dart/onnx_dart.dart"; import "package:onnx_dart/onnx_dart.dart";
import "package:onnxruntime/onnxruntime.dart"; import "package:onnxruntime/onnxruntime.dart";
import "package:photos/models/ml/face/dimension.dart";
import "package:photos/services/machine_learning/ml_model.dart"; import "package:photos/services/machine_learning/ml_model.dart";
import "package:photos/utils/image_ml_util.dart"; import "package:photos/utils/image_ml_util.dart";
import "package:photos/utils/ml_util.dart"; import "package:photos/utils/ml_util.dart";
@@ -28,13 +28,13 @@ class ClipImageEncoder extends MlModel {
factory ClipImageEncoder() => instance; factory ClipImageEncoder() => instance;
static Future<List<double>> predict( static Future<List<double>> predict(
Image image, Dimensions dim,
Uint8List rawRgbaBytes, Uint8List rawRgbaBytes,
int sessionAddress, [ int sessionAddress, [
int? enteFileID, int? enteFileID,
]) async { ]) async {
final startTime = DateTime.now(); final startTime = DateTime.now();
final inputList = await preprocessImageClip(image, rawRgbaBytes); final inputList = await preprocessImageClip(dim, rawRgbaBytes);
final preprocessingTime = DateTime.now(); final preprocessingTime = DateTime.now();
final preprocessingMs = final preprocessingMs =
preprocessingTime.difference(startTime).inMilliseconds; preprocessingTime.difference(startTime).inMilliseconds;
@@ -86,6 +86,9 @@ class ClipImageEncoder extends MlModel {
Float32List inputList, Float32List inputList,
) async { ) async {
final OnnxDart plugin = OnnxDart(); final OnnxDart plugin = OnnxDart();
_logger.info(
"Running Clip image predict using OnnxDart plugin with input size ${inputList.length}",
);
final result = await plugin.predict( final result = await plugin.predict(
inputList, inputList,
_modelName, _modelName,

View File

@@ -1,5 +1,4 @@
import "dart:async" show Timer, unawaited; import "dart:async" show Timer, unawaited;
import "dart:ui" show Image;
import "package:flutter/foundation.dart"; import "package:flutter/foundation.dart";
import "package:logging/logging.dart"; import "package:logging/logging.dart";
@@ -10,6 +9,7 @@ import "package:photos/db/ml/db.dart";
import 'package:photos/events/embedding_updated_event.dart'; import 'package:photos/events/embedding_updated_event.dart';
import "package:photos/models/file/file.dart"; import "package:photos/models/file/file.dart";
import "package:photos/models/ml/clip.dart"; import "package:photos/models/ml/clip.dart";
import "package:photos/models/ml/face/dimension.dart";
import "package:photos/models/ml/ml_versions.dart"; import "package:photos/models/ml/ml_versions.dart";
import "package:photos/service_locator.dart"; import "package:photos/service_locator.dart";
import "package:photos/services/collections_service.dart"; import "package:photos/services/collections_service.dart";
@@ -303,17 +303,24 @@ class SemanticSearchService {
static Future<ClipResult> runClipImage( static Future<ClipResult> runClipImage(
int enteFileID, int enteFileID,
Image image, Dimensions dimensions,
Uint8List rawRgbaBytes, Uint8List rawRgbaBytes,
int clipImageAddress, int clipImageAddress,
) async { ) async {
_logger.info(
"Running Clip image encoding for file ID: $enteFileID",
);
final embedding = await ClipImageEncoder.predict( final embedding = await ClipImageEncoder.predict(
image, dimensions,
rawRgbaBytes, rawRgbaBytes,
clipImageAddress, clipImageAddress,
enteFileID, enteFileID,
); );
_logger.info(
"Clip image encoding completed for file ID: $enteFileID",
);
final clipResult = ClipResult(fileID: enteFileID, embedding: embedding); final clipResult = ClipResult(fileID: enteFileID, embedding: embedding);
return clipResult; return clipResult;

View File

@@ -95,6 +95,10 @@ class RemoteSyncService {
} }
Future<void> sync({bool silently = false}) async { Future<void> sync({bool silently = false}) async {
// TODO: remove
// if (!isProcessBg) {
// return;
// }
if (!_config.hasConfiguredAccount()) { if (!_config.hasConfiguredAccount()) {
_logger.info("Skipping remote sync since account is not configured"); _logger.info("Skipping remote sync since account is not configured");
return; return;
@@ -377,10 +381,9 @@ class RemoteSyncService {
localIDsToSync.removeAll(alreadyClaimedLocalIDs); localIDsToSync.removeAll(alreadyClaimedLocalIDs);
if (alreadyClaimedLocalIDs.isNotEmpty && !_hasCleanupStaleEntry) { if (alreadyClaimedLocalIDs.isNotEmpty && !_hasCleanupStaleEntry) {
try { try {
await _db.removeQueuedLocalFiles(alreadyClaimedLocalIDs); await _db.removeQueuedLocalFiles(alreadyClaimedLocalIDs);
} catch(e, s) { } catch (e, s) {
_logger.severe("removeQueuedLocalFiles failed",e,s); _logger.severe("removeQueuedLocalFiles failed", e, s);
} }
} }
} }

View File

@@ -23,7 +23,8 @@ void callbackDispatcher() {
try { try {
BgTaskUtils.$.info('Task started $tlog'); BgTaskUtils.$.info('Task started $tlog');
await runBackgroundTask(taskName, tlog).timeout( await runBackgroundTask(taskName, tlog).timeout(
Platform.isIOS ? kBGTaskTimeout : const Duration(hours: 1), // TODO: For testing don't do seppuku
Platform.isIOS && false ? kBGTaskTimeout : const Duration(hours: 1),
onTimeout: () async { onTimeout: () async {
BgTaskUtils.$.warning( BgTaskUtils.$.warning(
"TLE, committing seppuku for taskID: $taskName", "TLE, committing seppuku for taskID: $taskName",

View File

@@ -7,6 +7,7 @@ import "dart:ui";
import "package:exif_reader/exif_reader.dart"; import "package:exif_reader/exif_reader.dart";
import 'package:flutter/painting.dart' as paint show decodeImageFromList; import 'package:flutter/painting.dart' as paint show decodeImageFromList;
import "package:flutter_image_compress/flutter_image_compress.dart"; import "package:flutter_image_compress/flutter_image_compress.dart";
import 'package:image/image.dart' as img_pkg;
import "package:logging/logging.dart"; import "package:logging/logging.dart";
import 'package:ml_linalg/linalg.dart'; import 'package:ml_linalg/linalg.dart';
import "package:photos/models/ml/face/box.dart"; import "package:photos/models/ml/face/box.dart";
@@ -38,16 +39,66 @@ const int _faceThumbnailCompressionQuality = 90;
const int _faceThumbnailMinDimension = 512; const int _faceThumbnailMinDimension = 512;
class DecodedImage { class DecodedImage {
final Image image; final Dimensions dimensions;
final Image? image;
final Uint8List? rawRgbaBytes; final Uint8List? rawRgbaBytes;
const DecodedImage(this.image, [this.rawRgbaBytes]); const DecodedImage({
required this.dimensions,
this.image,
this.rawRgbaBytes,
});
} }
Future<DecodedImage> decodeImageFromPath( Future<DecodedImage> decodeImageFromPath(
String imagePath, { String imagePath, {
required bool includeRgbaBytes, required bool includeRgbaBytes,
required bool includeDartUiImage,
bool inBackground = false,
}) async { }) async {
if (inBackground) {
if (includeDartUiImage) {
_logger.severe(
"Decoding image in background with Dart UI Image is not possible!",
);
throw Exception(
"Decoding image in background with Dart UI Image is not possible!",
);
}
if (!includeRgbaBytes) {
_logger.severe(
"Decoding image in background but not returning anything",
);
throw Exception(
"Decoding image in background but not returning anything",
);
}
final image = await img_pkg.decodeImageFile(imagePath);
final imageData = image?.data;
if (imageData == null) {
_logger.severe(
"Failed to decode image from file: $imagePath using image package",
);
throw Exception(
"Failed to decode image from file: $imagePath using image package",
);
}
Uint8List? bytes;
for (final order in img_pkg.ChannelOrder.values) {
bytes = imageData.getBytes(order: order);
_logger.info("Bytes length is: ${bytes.length}, for order: : $order");
}
final dimensions = Dimensions(
width: image!.width,
height: image.height,
);
_logger.info("Dimensions are: $dimensions");
return DecodedImage(
dimensions: dimensions,
rawRgbaBytes: bytes,
);
}
final imageData = await File(imagePath).readAsBytes(); final imageData = await File(imagePath).readAsBytes();
final Map<String, IfdTag> exifData = await readExifFromBytes(imageData); final Map<String, IfdTag> exifData = await readExifFromBytes(imageData);
@@ -66,6 +117,9 @@ Future<DecodedImage> decodeImageFromPath(
} }
late Image image; late Image image;
_logger.info(
'Decoding image at path: $imagePath, format: $format, includeRgbaBytes: $includeRgbaBytes',
);
try { try {
image = await decodeImageFromData(imageData); image = await decodeImageFromData(imageData);
} catch (e, s) { } catch (e, s) {
@@ -99,11 +153,24 @@ Future<DecodedImage> decodeImageFromPath(
); );
} }
} }
_logger.info(
"Decoded image at path: $imagePath [i]",
);
if (!includeRgbaBytes) { if (!includeRgbaBytes) {
return DecodedImage(image); return DecodedImage(
dimensions: Dimensions(width: image.width, height: image.height),
image: includeDartUiImage ? image : null,
);
} }
_logger.info(
"Getting Raw RGBA",
);
final rawRgbaBytes = await _getRawRgbaBytes(image); final rawRgbaBytes = await _getRawRgbaBytes(image);
return DecodedImage(image, rawRgbaBytes); return DecodedImage(
dimensions: Dimensions(width: image.width, height: image.height),
image: includeDartUiImage ? image : null,
rawRgbaBytes: rawRgbaBytes,
);
} }
/// Decodes [Uint8List] image data to an ui.[Image] object. /// Decodes [Uint8List] image data to an ui.[Image] object.
@@ -170,15 +237,21 @@ Future<List<Uint8List>> generateFaceThumbnailsUsingCanvas(
final decodedImage = await decodeImageFromPath( final decodedImage = await decodeImageFromPath(
imagePath, imagePath,
includeRgbaBytes: false, includeRgbaBytes: false,
includeDartUiImage: true,
); );
final Image img = decodedImage.image; final dimensions = decodedImage.dimensions;
final Image? img = decodedImage.image;
if (img == null) {
_logger.severe('Image is null, cannot generate face thumbnails');
return [];
}
final futureFaceThumbnails = <Future<Uint8List>>[]; final futureFaceThumbnails = <Future<Uint8List>>[];
for (final faceBox in faceBoxes) { for (final faceBox in faceBoxes) {
// Note that the faceBox values are relative to the image size, so we need to convert them to absolute values first // Note that the faceBox values are relative to the image size, so we need to convert them to absolute values first
final double xMinAbs = faceBox.x * img.width; final double xMinAbs = faceBox.x * dimensions.width;
final double yMinAbs = faceBox.y * img.height; final double yMinAbs = faceBox.y * dimensions.height;
final double widthAbs = faceBox.width * img.width; final double widthAbs = faceBox.width * dimensions.width;
final double heightAbs = faceBox.height * img.height; final double heightAbs = faceBox.height * dimensions.height;
// Calculate the crop values by adding some padding around the face and making sure it's centered // Calculate the crop values by adding some padding around the face and making sure it's centered
const regularPadding = 0.4; const regularPadding = 0.4;
@@ -193,10 +266,10 @@ Future<List<Uint8List>> generateFaceThumbnailsUsingCanvas(
2 * min(yOvershoot, regularPadding - minimumPadding) * heightAbs; 2 * min(yOvershoot, regularPadding - minimumPadding) * heightAbs;
// Prevent the face from going out of image bounds // Prevent the face from going out of image bounds
final xCropSafe = xCrop.clamp(0, img.width); final xCropSafe = xCrop.clamp(0, dimensions.width);
final yCropSafe = yCrop.clamp(0, img.height); final yCropSafe = yCrop.clamp(0, dimensions.height);
final widthCropSafe = widthCrop.clamp(0, img.width - xCropSafe); final widthCropSafe = widthCrop.clamp(0, dimensions.width - xCropSafe);
final heightCropSafe = heightCrop.clamp(0, img.height - yCropSafe); final heightCropSafe = heightCrop.clamp(0, dimensions.height - yCropSafe);
futureFaceThumbnails.add( futureFaceThumbnails.add(
_cropAndEncodeCanvas( _cropAndEncodeCanvas(
@@ -223,14 +296,14 @@ Future<List<Uint8List>> generateFaceThumbnailsUsingCanvas(
} }
Future<(Float32List, Dimensions)> preprocessImageYoloFace( Future<(Float32List, Dimensions)> preprocessImageYoloFace(
Image image, Dimensions dim,
Uint8List rawRgbaBytes, Uint8List rawRgbaBytes,
) async { ) async {
const requiredWidth = 640; const requiredWidth = 640;
const requiredHeight = 640; const requiredHeight = 640;
final scale = min(requiredWidth / image.width, requiredHeight / image.height); final scale = min(requiredWidth / dim.width, requiredHeight / dim.height);
final scaledWidth = (image.width * scale).round().clamp(0, requiredWidth); final scaledWidth = (dim.width * scale).round().clamp(0, requiredWidth);
final scaledHeight = (image.height * scale).round().clamp(0, requiredHeight); final scaledHeight = (dim.height * scale).round().clamp(0, requiredHeight);
final processedBytes = Float32List(3 * requiredHeight * requiredWidth); final processedBytes = Float32List(3 * requiredHeight * requiredWidth);
@@ -247,7 +320,7 @@ Future<(Float32List, Dimensions)> preprocessImageYoloFace(
pixel = _getPixelBilinear( pixel = _getPixelBilinear(
w / scale, w / scale,
h / scale, h / scale,
image, dim,
rawRgbaBytes, rawRgbaBytes,
); );
} }
@@ -262,16 +335,16 @@ Future<(Float32List, Dimensions)> preprocessImageYoloFace(
} }
Future<Float32List> preprocessImageClip( Future<Float32List> preprocessImageClip(
Image image, Dimensions dim,
Uint8List rawRgbaBytes, Uint8List rawRgbaBytes,
) async { ) async {
const int requiredWidth = 256; const int requiredWidth = 256;
const int requiredHeight = 256; const int requiredHeight = 256;
const int requiredSize = 3 * requiredWidth * requiredHeight; const int requiredSize = 3 * requiredWidth * requiredHeight;
final scale = max(requiredWidth / image.width, requiredHeight / image.height); final scale = max(requiredWidth / dim.width, requiredHeight / dim.height);
final bool useAntiAlias = scale < 0.8; final bool useAntiAlias = scale < 0.8;
final scaledWidth = (image.width * scale).round(); final scaledWidth = (dim.width * scale).round();
final scaledHeight = (image.height * scale).round(); final scaledHeight = (dim.height * scale).round();
final widthOffset = max(0, scaledWidth - requiredWidth) / 2; final widthOffset = max(0, scaledWidth - requiredWidth) / 2;
final heightOffset = max(0, scaledHeight - requiredHeight) / 2; final heightOffset = max(0, scaledHeight - requiredHeight) / 2;
@@ -285,7 +358,7 @@ Future<Float32List> preprocessImageClip(
final RGB pixel = _getPixelBilinear( final RGB pixel = _getPixelBilinear(
w / scale, w / scale,
h / scale, h / scale,
image, dim,
rawRgbaBytes, rawRgbaBytes,
antiAlias: useAntiAlias, antiAlias: useAntiAlias,
); );
@@ -301,20 +374,19 @@ Future<Float32List> preprocessImageClip(
Future<(Float32List, List<AlignmentResult>, List<bool>, List<double>, Size)> Future<(Float32List, List<AlignmentResult>, List<bool>, List<double>, Size)>
preprocessToMobileFaceNetFloat32List( preprocessToMobileFaceNetFloat32List(
Image image, Dimensions dim,
Uint8List rawRgbaBytes, Uint8List rawRgbaBytes,
List<FaceDetectionRelative> relativeFaces, { List<FaceDetectionRelative> relativeFaces, {
int width = 112, int width = 112,
int height = 112, int height = 112,
}) async { }) async {
final Size originalSize = final Size originalSize = Size(dim.width.toDouble(), dim.height.toDouble());
Size(image.width.toDouble(), image.height.toDouble());
final List<FaceDetectionAbsolute> absoluteFaces = final List<FaceDetectionAbsolute> absoluteFaces =
relativeToAbsoluteDetections( relativeToAbsoluteDetections(
relativeDetections: relativeFaces, relativeDetections: relativeFaces,
imageWidth: image.width, imageWidth: dim.width,
imageHeight: image.height, imageHeight: dim.height,
); );
final alignedImagesFloat32List = final alignedImagesFloat32List =
@@ -338,7 +410,7 @@ Future<(Float32List, List<AlignmentResult>, List<bool>, List<double>, Size)>
alignmentResults.add(alignmentResult); alignmentResults.add(alignmentResult);
_warpAffineFloat32List( _warpAffineFloat32List(
image, dim,
rawRgbaBytes, rawRgbaBytes,
alignmentResult.affineMatrix, alignmentResult.affineMatrix,
alignedImagesFloat32List, alignedImagesFloat32List,
@@ -372,14 +444,14 @@ Future<(Float32List, List<AlignmentResult>, List<bool>, List<double>, Size)>
RGB _readPixelColor( RGB _readPixelColor(
int x, int x,
int y, int y,
Image image, Dimensions dim,
Uint8List rgbaBytes, Uint8List rgbaBytes,
) { ) {
if (y < 0 || y >= image.height || x < 0 || x >= image.width) { if (y < 0 || y >= dim.height || x < 0 || x >= dim.width) {
if (y < -maxKernelRadius || if (y < -maxKernelRadius ||
y >= image.height + maxKernelRadius || y >= dim.height + maxKernelRadius ||
x < -maxKernelRadius || x < -maxKernelRadius ||
x >= image.width + maxKernelRadius) { x >= dim.width + maxKernelRadius) {
_logger.severe( _logger.severe(
'`readPixelColor`: Invalid pixel coordinates, out of bounds. x: $x, y: $y', '`readPixelColor`: Invalid pixel coordinates, out of bounds. x: $x, y: $y',
); );
@@ -387,9 +459,9 @@ RGB _readPixelColor(
return const (114, 114, 114); return const (114, 114, 114);
} }
assert(rgbaBytes.lengthInBytes == 4 * image.width * image.height); assert(rgbaBytes.lengthInBytes == 4 * dim.width * dim.height);
final int byteOffset = 4 * (image.width * y + x); final int byteOffset = 4 * (dim.width * y + x);
return ( return (
rgbaBytes[byteOffset], // red rgbaBytes[byteOffset], // red
rgbaBytes[byteOffset + 1], // green rgbaBytes[byteOffset + 1], // green
@@ -400,7 +472,7 @@ RGB _readPixelColor(
RGB _getPixelBlurred( RGB _getPixelBlurred(
int x, int x,
int y, int y,
Image image, Dimensions dim,
Uint8List rgbaBytes, Uint8List rgbaBytes,
) { ) {
double r = 0, g = 0, b = 0; double r = 0, g = 0, b = 0;
@@ -409,7 +481,7 @@ RGB _getPixelBlurred(
final int px = (x - gaussianKernelRadius + kx); final int px = (x - gaussianKernelRadius + kx);
final int py = (y - gaussianKernelRadius + ky); final int py = (y - gaussianKernelRadius + ky);
final RGB pixelRgbTuple = _readPixelColor(px, py, image, rgbaBytes); final RGB pixelRgbTuple = _readPixelColor(px, py, dim, rgbaBytes);
final double weight = gaussianKernel[ky][kx]; final double weight = gaussianKernel[ky][kx];
r += pixelRgbTuple.$1 * weight; r += pixelRgbTuple.$1 * weight;
@@ -477,7 +549,7 @@ Future<Image> _cropImage(
} }
void _warpAffineFloat32List( void _warpAffineFloat32List(
Image inputImage, Dimensions dim,
Uint8List rawRgbaBytes, Uint8List rawRgbaBytes,
List<List<double>> affineMatrix, List<List<double>> affineMatrix,
Float32List outputList, Float32List outputList,
@@ -532,8 +604,7 @@ void _warpAffineFloat32List(
final num xOrigin = (xTrans - b00) * a00Prime + (yTrans - b10) * a01Prime; final num xOrigin = (xTrans - b00) * a00Prime + (yTrans - b10) * a01Prime;
final num yOrigin = (xTrans - b00) * a10Prime + (yTrans - b10) * a11Prime; final num yOrigin = (xTrans - b00) * a10Prime + (yTrans - b10) * a11Prime;
final RGB pixel = final RGB pixel = _getPixelBicubic(xOrigin, yOrigin, dim, rawRgbaBytes);
_getPixelBicubic(xOrigin, yOrigin, inputImage, rawRgbaBytes);
// Set the new pixel // Set the new pixel
outputList[startIndex + 3 * (yTrans * width + xTrans)] = outputList[startIndex + 3 * (yTrans * width + xTrans)] =
@@ -591,13 +662,13 @@ Future<List<Uint8List>> compressFaceThumbnails(Map args) async {
RGB _getPixelBilinear( RGB _getPixelBilinear(
num fx, num fx,
num fy, num fy,
Image image, Dimensions dim,
Uint8List rawRgbaBytes, { Uint8List rawRgbaBytes, {
bool antiAlias = false, bool antiAlias = false,
}) { }) {
// Clamp to image boundaries // Clamp to image boundaries
fx = fx.clamp(0, image.width - 1); fx = fx.clamp(0, dim.width - 1);
fy = fy.clamp(0, image.height - 1); fy = fy.clamp(0, dim.height - 1);
// Get the surrounding coordinates and their weights // Get the surrounding coordinates and their weights
final int x0 = fx.floor(); final int x0 = fx.floor();
@@ -610,12 +681,12 @@ RGB _getPixelBilinear(
final dy1 = 1.0 - dy; final dy1 = 1.0 - dy;
// Get the original pixels (with gaussian blur if antialias) // Get the original pixels (with gaussian blur if antialias)
final RGB Function(int, int, Image, Uint8List) readPixel = final RGB Function(int, int, Dimensions, Uint8List) readPixel =
antiAlias ? _getPixelBlurred : _readPixelColor; antiAlias ? _getPixelBlurred : _readPixelColor;
final RGB pixel1 = readPixel(x0, y0, image, rawRgbaBytes); final RGB pixel1 = readPixel(x0, y0, dim, rawRgbaBytes);
final RGB pixel2 = readPixel(x1, y0, image, rawRgbaBytes); final RGB pixel2 = readPixel(x1, y0, dim, rawRgbaBytes);
final RGB pixel3 = readPixel(x0, y1, image, rawRgbaBytes); final RGB pixel3 = readPixel(x0, y1, dim, rawRgbaBytes);
final RGB pixel4 = readPixel(x1, y1, image, rawRgbaBytes); final RGB pixel4 = readPixel(x1, y1, dim, rawRgbaBytes);
int bilinear( int bilinear(
num val1, num val1,
@@ -635,9 +706,9 @@ RGB _getPixelBilinear(
} }
/// Get the pixel value using Bicubic Interpolation. Code taken mainly from https://github.com/brendan-duncan/image/blob/6e407612752ffdb90b28cd5863c7f65856349348/lib/src/image/image.dart#L697 /// Get the pixel value using Bicubic Interpolation. Code taken mainly from https://github.com/brendan-duncan/image/blob/6e407612752ffdb90b28cd5863c7f65856349348/lib/src/image/image.dart#L697
RGB _getPixelBicubic(num fx, num fy, Image image, Uint8List rawRgbaBytes) { RGB _getPixelBicubic(num fx, num fy, Dimensions dim, Uint8List rawRgbaBytes) {
fx = fx.clamp(0, image.width - 1); fx = fx.clamp(0, dim.width - 1);
fy = fy.clamp(0, image.height - 1); fy = fy.clamp(0, dim.height - 1);
final x = fx.toInt() - (fx >= 0.0 ? 0 : 1); final x = fx.toInt() - (fx >= 0.0 ? 0 : 1);
final px = x - 1; final px = x - 1;
@@ -656,62 +727,60 @@ RGB _getPixelBicubic(num fx, num fy, Image image, Uint8List rawRgbaBytes) {
dx * dx * (2 * ipp - 5 * icp + 4 * inp - iap) + dx * dx * (2 * ipp - 5 * icp + 4 * inp - iap) +
dx * dx * dx * (-ipp + 3 * icp - 3 * inp + iap)); dx * dx * dx * (-ipp + 3 * icp - 3 * inp + iap));
final icc = _readPixelColor(x, y, image, rawRgbaBytes); final icc = _readPixelColor(x, y, dim, rawRgbaBytes);
final ipp = final ipp =
px < 0 || py < 0 ? icc : _readPixelColor(px, py, image, rawRgbaBytes); px < 0 || py < 0 ? icc : _readPixelColor(px, py, dim, rawRgbaBytes);
final icp = px < 0 ? icc : _readPixelColor(x, py, image, rawRgbaBytes); final icp = px < 0 ? icc : _readPixelColor(x, py, dim, rawRgbaBytes);
final inp = py < 0 || nx >= image.width final inp = py < 0 || nx >= dim.width
? icc ? icc
: _readPixelColor(nx, py, image, rawRgbaBytes); : _readPixelColor(nx, py, dim, rawRgbaBytes);
final iap = ax >= image.width || py < 0 final iap = ax >= dim.width || py < 0
? icc ? icc
: _readPixelColor(ax, py, image, rawRgbaBytes); : _readPixelColor(ax, py, dim, rawRgbaBytes);
final ip0 = cubic(dx, ipp.$1, icp.$1, inp.$1, iap.$1); final ip0 = cubic(dx, ipp.$1, icp.$1, inp.$1, iap.$1);
final ip1 = cubic(dx, ipp.$2, icp.$2, inp.$2, iap.$2); final ip1 = cubic(dx, ipp.$2, icp.$2, inp.$2, iap.$2);
final ip2 = cubic(dx, ipp.$3, icp.$3, inp.$3, iap.$3); final ip2 = cubic(dx, ipp.$3, icp.$3, inp.$3, iap.$3);
// final ip3 = cubic(dx, ipp.a, icp.a, inp.a, iap.a); // final ip3 = cubic(dx, ipp.a, icp.a, inp.a, iap.a);
final ipc = px < 0 ? icc : _readPixelColor(px, y, image, rawRgbaBytes); final ipc = px < 0 ? icc : _readPixelColor(px, y, dim, rawRgbaBytes);
final inc = final inc = nx >= dim.width ? icc : _readPixelColor(nx, y, dim, rawRgbaBytes);
nx >= image.width ? icc : _readPixelColor(nx, y, image, rawRgbaBytes); final iac = ax >= dim.width ? icc : _readPixelColor(ax, y, dim, rawRgbaBytes);
final iac =
ax >= image.width ? icc : _readPixelColor(ax, y, image, rawRgbaBytes);
final ic0 = cubic(dx, ipc.$1, icc.$1, inc.$1, iac.$1); final ic0 = cubic(dx, ipc.$1, icc.$1, inc.$1, iac.$1);
final ic1 = cubic(dx, ipc.$2, icc.$2, inc.$2, iac.$2); final ic1 = cubic(dx, ipc.$2, icc.$2, inc.$2, iac.$2);
final ic2 = cubic(dx, ipc.$3, icc.$3, inc.$3, iac.$3); final ic2 = cubic(dx, ipc.$3, icc.$3, inc.$3, iac.$3);
// final ic3 = cubic(dx, ipc.a, icc.a, inc.a, iac.a); // final ic3 = cubic(dx, ipc.a, icc.a, inc.a, iac.a);
final ipn = px < 0 || ny >= image.height final ipn = px < 0 || ny >= dim.height
? icc ? icc
: _readPixelColor(px, ny, image, rawRgbaBytes); : _readPixelColor(px, ny, dim, rawRgbaBytes);
final icn = final icn =
ny >= image.height ? icc : _readPixelColor(x, ny, image, rawRgbaBytes); ny >= dim.height ? icc : _readPixelColor(x, ny, dim, rawRgbaBytes);
final inn = nx >= image.width || ny >= image.height final inn = nx >= dim.width || ny >= dim.height
? icc ? icc
: _readPixelColor(nx, ny, image, rawRgbaBytes); : _readPixelColor(nx, ny, dim, rawRgbaBytes);
final ian = ax >= image.width || ny >= image.height final ian = ax >= dim.width || ny >= dim.height
? icc ? icc
: _readPixelColor(ax, ny, image, rawRgbaBytes); : _readPixelColor(ax, ny, dim, rawRgbaBytes);
final in0 = cubic(dx, ipn.$1, icn.$1, inn.$1, ian.$1); final in0 = cubic(dx, ipn.$1, icn.$1, inn.$1, ian.$1);
final in1 = cubic(dx, ipn.$2, icn.$2, inn.$2, ian.$2); final in1 = cubic(dx, ipn.$2, icn.$2, inn.$2, ian.$2);
final in2 = cubic(dx, ipn.$3, icn.$3, inn.$3, ian.$3); final in2 = cubic(dx, ipn.$3, icn.$3, inn.$3, ian.$3);
// final in3 = cubic(dx, ipn.a, icn.a, inn.a, ian.a); // final in3 = cubic(dx, ipn.a, icn.a, inn.a, ian.a);
final ipa = px < 0 || ay >= image.height final ipa = px < 0 || ay >= dim.height
? icc ? icc
: _readPixelColor(px, ay, image, rawRgbaBytes); : _readPixelColor(px, ay, dim, rawRgbaBytes);
final ica = final ica =
ay >= image.height ? icc : _readPixelColor(x, ay, image, rawRgbaBytes); ay >= dim.height ? icc : _readPixelColor(x, ay, dim, rawRgbaBytes);
final ina = nx >= image.width || ay >= image.height final ina = nx >= dim.width || ay >= dim.height
? icc ? icc
: _readPixelColor(nx, ay, image, rawRgbaBytes); : _readPixelColor(nx, ay, dim, rawRgbaBytes);
final iaa = ax >= image.width || ay >= image.height final iaa = ax >= dim.width || ay >= dim.height
? icc ? icc
: _readPixelColor(ax, ay, image, rawRgbaBytes); : _readPixelColor(ax, ay, dim, rawRgbaBytes);
final ia0 = cubic(dx, ipa.$1, ica.$1, ina.$1, iaa.$1); final ia0 = cubic(dx, ipa.$1, ica.$1, ina.$1, iaa.$1);
final ia1 = cubic(dx, ipa.$2, ica.$2, ina.$2, iaa.$2); final ia1 = cubic(dx, ipa.$2, ica.$2, ina.$2, iaa.$2);

View File

@@ -128,6 +128,8 @@ abstract class SuperIsolate {
final taskID = newIsolateTaskID(operation.name); final taskID = newIsolateTaskID(operation.name);
_mainSendPort.send([taskID, operation.index, args, answerPort.sendPort]); _mainSendPort.send([taskID, operation.index, args, answerPort.sendPort]);
logger.info("Activity ${operation.name} started");
answerPort.listen((receivedMessage) { answerPort.listen((receivedMessage) {
if (receivedMessage['taskID'] != taskID) { if (receivedMessage['taskID'] != taskID) {
logger.severe("Received isolate message with wrong taskID"); logger.severe("Received isolate message with wrong taskID");
@@ -136,6 +138,7 @@ abstract class SuperIsolate {
final logs = receivedMessage['logs'] as List<String>; final logs = receivedMessage['logs'] as List<String>;
IsolateLogger.handLogStringsToMainLogger(logs); IsolateLogger.handLogStringsToMainLogger(logs);
final data = receivedMessage['data']; final data = receivedMessage['data'];
if (data is Map && data.containsKey('error')) { if (data is Map && data.containsKey('error')) {
// Handle the error // Handle the error
final errorMessage = data['error']; final errorMessage = data['error'];
@@ -143,11 +146,13 @@ abstract class SuperIsolate {
final exception = Exception(errorMessage); final exception = Exception(errorMessage);
final stackTrace = StackTrace.fromString(errorStackTrace); final stackTrace = StackTrace.fromString(errorStackTrace);
completer.completeError(exception, stackTrace); completer.completeError(exception, stackTrace);
logger.severe("Activity ${operation.name} failed");
} else { } else {
completer.complete(data); completer.complete(data);
logger.info("Activity ${operation.name} completed");
} }
_activeTasks--;
}); });
_activeTasks--;
return completer.future; return completer.future;
}); });

View File

@@ -7,11 +7,11 @@ import "package:photos/db/files_db.dart";
import "package:photos/db/ml/db.dart"; import "package:photos/db/ml/db.dart";
import "package:photos/db/ml/filedata.dart"; import "package:photos/db/ml/filedata.dart";
import "package:photos/extensions/list.dart"; import "package:photos/extensions/list.dart";
import "package:photos/main.dart";
import "package:photos/models/file/extensions/file_props.dart"; import "package:photos/models/file/extensions/file_props.dart";
import "package:photos/models/file/file.dart"; import "package:photos/models/file/file.dart";
import "package:photos/models/file/file_type.dart"; import "package:photos/models/file/file_type.dart";
import "package:photos/models/ml/clip.dart"; import "package:photos/models/ml/clip.dart";
import "package:photos/models/ml/face/dimension.dart";
import "package:photos/models/ml/face/face.dart"; import "package:photos/models/ml/face/face.dart";
import "package:photos/models/ml/ml_versions.dart"; import "package:photos/models/ml/ml_versions.dart";
import "package:photos/service_locator.dart"; import "package:photos/service_locator.dart";
@@ -412,24 +412,34 @@ Future<MLResult> analyzeImageStatic(Map args) async {
); );
final startTime = DateTime.now(); final startTime = DateTime.now();
_logger.info("Decoding image at path: $imagePath");
// Decode the image once to use for both face detection and alignment // Decode the image once to use for both face detection and alignment
final decodedImage = final decodedImage = await decodeImageFromPath(
await decodeImageFromPath(imagePath, includeRgbaBytes: true); imagePath,
final image = decodedImage.image; includeRgbaBytes: true,
includeDartUiImage: false,
inBackground: isProcessBg,
);
final rawRgbaBytes = decodedImage.rawRgbaBytes!; final rawRgbaBytes = decodedImage.rawRgbaBytes!;
final decodedImageSize = final imageDimensions = decodedImage.dimensions;
Dimensions(height: image.height, width: image.width); _logger.info(
"Decoded image with rgbaLength: ${rawRgbaBytes.length}, dimensions: $imageDimensions",
);
final result = MLResult.fromEnteFileID(enteFileID); final result = MLResult.fromEnteFileID(enteFileID);
result.decodedImageSize = decodedImageSize; result.decodedImageSize = imageDimensions;
final decodeTime = DateTime.now(); final decodeTime = DateTime.now();
final decodeMs = decodeTime.difference(startTime).inMilliseconds; final decodeMs = decodeTime.difference(startTime).inMilliseconds;
_logger.info(
"Decoded image at path: $imagePath, in $decodeMs ms",
);
String faceMsString = "", clipMsString = ""; String faceMsString = "", clipMsString = "";
final pipelines = await Future.wait([ final pipelines = await Future.wait([
runFaces runFaces
? FaceRecognitionService.runFacesPipeline( ? FaceRecognitionService.runFacesPipeline(
enteFileID, enteFileID,
image, imageDimensions,
rawRgbaBytes, rawRgbaBytes,
faceDetectionAddress, faceDetectionAddress,
faceEmbeddingAddress, faceEmbeddingAddress,
@@ -442,7 +452,7 @@ Future<MLResult> analyzeImageStatic(Map args) async {
runClip runClip
? SemanticSearchService.runClipImage( ? SemanticSearchService.runClipImage(
enteFileID, enteFileID,
image, imageDimensions,
rawRgbaBytes, rawRgbaBytes,
clipImageAddress, clipImageAddress,
).then((result) { ).then((result) {