Explorar o código

[mob] Only run decoding of images once

laurenspriem hai 1 ano
pai
achega
0b0a0cec26

+ 10 - 8
mobile/lib/services/face_ml/face_detection/yolov5face/onnx_face_detection.dart

@@ -2,7 +2,8 @@ import "dart:async";
 import "dart:developer" as dev show log;
 import "dart:io" show File;
 import "dart:isolate";
-import 'dart:typed_data' show Float32List, Uint8List;
+import 'dart:typed_data' show ByteData, Float32List, Uint8List;
+import 'dart:ui' as ui show Image;
 
 import "package:computer/computer.dart";
 import 'package:flutter/material.dart';
@@ -314,18 +315,19 @@ class YoloOnnxFaceDetection {
 
   /// Detects faces in the given image data.
   static Future<(List<FaceDetectionRelative>, Size)> predictSync(
-    String imagePath,
+    ui.Image image,
+    ByteData imageByteData,
     int sessionAddress,
   ) async {
     assert(sessionAddress != 0 && sessionAddress != -1);
 
     final stopwatch = Stopwatch()..start();
 
-    final stopwatchDecoding = Stopwatch()..start();
-    final imageData = await File(imagePath).readAsBytes();
+    final stopwatchPreprocessing = Stopwatch()..start();
     final (inputImageList, originalSize, newSize) =
         await preprocessImageToFloat32ChannelsFirst(
-      imageData,
+      image,
+      imageByteData,
       normalization: 1,
       requiredWidth: kInputWidth,
       requiredHeight: kInputHeight,
@@ -344,12 +346,12 @@ class YoloOnnxFaceDetection {
       inputShape,
     );
     final inputs = {'input': inputOrt};
-    stopwatchDecoding.stop();
+    stopwatchPreprocessing.stop();
     dev.log(
-      'Face detection image decoding and preprocessing is finished, in ${stopwatchDecoding.elapsedMilliseconds}ms',
+      'Face detection image preprocessing is finished, in ${stopwatchPreprocessing.elapsedMilliseconds}ms',
     );
     _logger.info(
-      'Image decoding and preprocessing is finished, in ${stopwatchDecoding.elapsedMilliseconds}ms',
+      'Image decoding and preprocessing is finished, in ${stopwatchPreprocessing.elapsedMilliseconds}ms',
     );
     _logger.info('original size: $originalSize \n new size: $newSize');
 

+ 28 - 10
mobile/lib/services/face_ml/face_ml_service.dart

@@ -2,10 +2,11 @@ import "dart:async";
 import "dart:developer" as dev show log;
 import "dart:io" show File;
 import "dart:isolate";
-import "dart:typed_data" show Uint8List, Float32List;
+import "dart:typed_data" show Uint8List, Float32List, ByteData;
+import "dart:ui" show Image;
 
 import "package:computer/computer.dart";
-import "package:flutter/foundation.dart";
+import "package:flutter/foundation.dart" show debugPrint, kDebugMode;
 import "package:flutter_image_compress/flutter_image_compress.dart";
 import "package:flutter_isolate/flutter_isolate.dart";
 import "package:logging/logging.dart";
@@ -210,10 +211,19 @@ class FaceMlService {
             final stopwatchTotal = Stopwatch()..start();
             final stopwatch = Stopwatch()..start();
 
+            // Decode the image once to use for both face detection and alignment
+            final imageData = await File(imagePath).readAsBytes();
+            final image = await decodeImageFromData(imageData);
+            final ByteData imgByteData = await getByteDataFromImage(image);
+            dev.log('Reading and decoding image took '
+                '${stopwatch.elapsedMilliseconds} ms');
+            stopwatch.reset();
+
             // Get the faces
             final List<FaceDetectionRelative> faceDetectionResult =
                 await FaceMlService.detectFacesSync(
-              imagePath,
+              image,
+              imgByteData,
               faceDetectionAddress,
               resultBuilder: resultBuilder,
             );
@@ -235,12 +245,13 @@ class FaceMlService {
             // Align the faces
             final Float32List faceAlignmentResult =
                 await FaceMlService.alignFacesSync(
-              imagePath,
+              image,
+              imgByteData,
               faceDetectionResult,
               resultBuilder: resultBuilder,
             );
 
-            dev.log("Completed `alignFaces` function, in "
+            dev.log("Completed `alignFacesSync` function, in "
                 "${stopwatch.elapsedMilliseconds} ms");
 
             stopwatch.reset();
@@ -251,7 +262,7 @@ class FaceMlService {
               resultBuilder: resultBuilder,
             );
 
-            dev.log("Completed `embedBatchFaces` function, in "
+            dev.log("Completed `embedFacesSync` function, in "
                 "${stopwatch.elapsedMilliseconds} ms");
 
             stopwatch.stop();
@@ -899,7 +910,8 @@ class FaceMlService {
   ///
   /// Throws [CouldNotInitializeFaceDetector], [CouldNotRunFaceDetector] or [GeneralFaceMlException] if something goes wrong.
   static Future<List<FaceDetectionRelative>> detectFacesSync(
-    String imagePath,
+    Image image,
+    ByteData imageByteData,
     int interpreterAddress, {
     FaceMlResultBuilder? resultBuilder,
   }) async {
@@ -907,7 +919,8 @@ class FaceMlService {
       // Get the bounding boxes of the faces
       final (List<FaceDetectionRelative> faces, dataSize) =
           await YoloOnnxFaceDetection.predictSync(
-        imagePath,
+        image,
+        imageByteData,
         interpreterAddress,
       );
 
@@ -969,14 +982,19 @@ class FaceMlService {
   ///
   /// Throws [CouldNotWarpAffine] or [GeneralFaceMlException] if the face alignment fails.
   static Future<Float32List> alignFacesSync(
-    String imagePath,
+    Image image,
+    ByteData imageByteData,
     List<FaceDetectionRelative> faces, {
     FaceMlResultBuilder? resultBuilder,
   }) async {
     try {
       final stopwatch = Stopwatch()..start();
       final (alignedFaces, alignmentResults, _, blurValues, originalImageSize) =
-          await preprocessToMobileFaceNetFloat32List(imagePath, faces);
+          await preprocessToMobileFaceNetFloat32List(
+        image,
+        imageByteData,
+        faces,
+      );
       stopwatch.stop();
       dev.log(
         "Face alignment image decoding and processing took ${stopwatch.elapsedMilliseconds} ms",

+ 9 - 2
mobile/lib/utils/image_ml_isolate.dart

@@ -134,9 +134,12 @@ class ImageMlIsolate {
             final requiredWidth = args['requiredWidth'] as int;
             final requiredHeight = args['requiredHeight'] as int;
             final maintainAspectRatio = args['maintainAspectRatio'] as bool;
+            final Image image = await decodeImageFromData(imageData);
+            final imageByteData = await getByteDataFromImage(image);
             final (result, originalSize, newSize) =
                 await preprocessImageToFloat32ChannelsFirst(
-              imageData,
+              image,
+              imageByteData,
               normalization: normalization,
               requiredWidth: requiredWidth,
               requiredHeight: requiredHeight,
@@ -187,6 +190,9 @@ class ImageMlIsolate {
             final List<FaceDetectionRelative> relativeFaces = facesJson
                 .map((face) => FaceDetectionRelative.fromJson(face))
                 .toList();
+            final imageData = await File(imagePath).readAsBytes();
+            final Image image = await decodeImageFromData(imageData);
+            final imageByteData = await getByteDataFromImage(image);
             final (
               inputs,
               alignmentResults,
@@ -194,7 +200,8 @@ class ImageMlIsolate {
               blurValues,
               originalSize
             ) = await preprocessToMobileFaceNetFloat32List(
-              imagePath,
+              image,
+              imageByteData,
               relativeFaces,
             );
             final List<Map<String, dynamic>> alignmentResultsJson =

+ 7 - 9
mobile/lib/utils/image_ml_util.dart

@@ -717,7 +717,8 @@ Future<(Num3DInputMatrix, Size, Size)> preprocessImageToMatrix(
 }
 
 Future<(Float32List, Size, Size)> preprocessImageToFloat32ChannelsFirst(
-  Uint8List imageData, {
+  Image image,
+  ByteData imgByteData, {
   required int normalization,
   required int requiredWidth,
   required int requiredHeight,
@@ -729,8 +730,6 @@ Future<(Float32List, Size, Size)> preprocessImageToFloat32ChannelsFirst(
       : normalization == 1
           ? normalizePixelRange1
           : normalizePixelNoRange;
-  final Image image = await decodeImageFromData(imageData);
-  final ByteData imgByteData = await getByteDataFromImage(image);
   final originalSize = Size(image.width.toDouble(), image.height.toDouble());
 
   if (image.width == requiredWidth && image.height == requiredHeight) {
@@ -1078,17 +1077,14 @@ Future<(Float32List, List<AlignmentResult>, List<bool>, List<double>, Size)>
 
 Future<(Float32List, List<AlignmentResult>, List<bool>, List<double>, Size)>
     preprocessToMobileFaceNetFloat32List(
-  String imagePath,
+  Image image,
+  ByteData imageByteData,
   List<FaceDetectionRelative> relativeFaces, {
   int width = 112,
   int height = 112,
 }) async {
-  final Uint8List imageData = await File(imagePath).readAsBytes();
   final stopwatch = Stopwatch()..start();
-  final Image image = await decodeImageFromData(imageData);
-  final imageByteData = await getByteDataFromImage(image);
-  stopwatch.stop();
-  log("Face Alignment decoding ui image took: ${stopwatch.elapsedMilliseconds} ms");
+  
   final Size originalSize =
       Size(image.width.toDouble(), image.height.toDouble());
 
@@ -1147,6 +1143,8 @@ Future<(Float32List, List<AlignmentResult>, List<bool>, List<double>, Size)>
     isBlurs.add(isBlur);
     blurValues.add(blurValue);
   }
+  stopwatch.stop();
+  log("Face Alignment took: ${stopwatch.elapsedMilliseconds} ms");
   return (
     alignedImagesFloat32List,
     alignmentResults,