123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529 |
- import 'dart:async';
- import "dart:io" show File;
- import 'dart:isolate';
- import 'dart:typed_data' show Float32List, Uint8List;
- import 'dart:ui';
- import "package:flutter/rendering.dart";
- import 'package:flutter_isolate/flutter_isolate.dart';
- import "package:logging/logging.dart";
- import "package:photos/face/model/box.dart";
- import "package:photos/face/model/dimension.dart";
- import 'package:photos/models/ml/ml_typedefs.dart';
- import 'package:photos/services/machine_learning/face_ml/face_alignment/alignment_result.dart';
- import 'package:photos/services/machine_learning/face_ml/face_detection/detection.dart';
- import "package:photos/utils/image_ml_util.dart";
- import "package:synchronized/synchronized.dart";
- enum ImageOperation {
- @Deprecated("No longer using BlazeFace`")
- preprocessBlazeFace,
- preprocessYoloOnnx,
- preprocessFaceAlign,
- preprocessMobileFaceNet,
- preprocessMobileFaceNetOnnx,
- generateFaceThumbnails,
- cropAndPadFace,
- }
- /// The isolate below uses functions from ["package:photos/utils/image_ml_util.dart"] to preprocess images for ML models.
- /// This class is responsible for all image operations needed for ML models. It runs in a separate isolate to avoid jank.
- ///
- /// It can be accessed through the singleton `ImageConversionIsolate.instance`. e.g. `ImageConversionIsolate.instance.convert(imageData)`
- ///
- /// IMPORTANT: Make sure to dispose of the isolate when you're done with it with `dispose()`, e.g. `ImageConversionIsolate.instance.dispose();`
- class ImageMlIsolate {
- // static const String debugName = 'ImageMlIsolate';
- final _logger = Logger('ImageMlIsolate');
- Timer? _inactivityTimer;
- final Duration _inactivityDuration = const Duration(seconds: 60);
- int _activeTasks = 0;
- final _initLock = Lock();
- final _functionLock = Lock();
- late FlutterIsolate _isolate;
- late ReceivePort _receivePort = ReceivePort();
- late SendPort _mainSendPort;
- bool isSpawned = false;
- // singleton pattern
- ImageMlIsolate._privateConstructor();
- /// Use this instance to access the ImageConversionIsolate service. Make sure to call `init()` before using it.
- /// e.g. `await ImageConversionIsolate.instance.init();`
- /// And kill the isolate when you're done with it with `dispose()`, e.g. `ImageConversionIsolate.instance.dispose();`
- ///
- /// Then you can use `convert()` to get the image, so `ImageConversionIsolate.instance.convert(imageData, imagePath: imagePath)`
- static final ImageMlIsolate instance = ImageMlIsolate._privateConstructor();
- factory ImageMlIsolate() => instance;
- Future<void> init() async {
- return _initLock.synchronized(() async {
- if (isSpawned) return;
- _receivePort = ReceivePort();
- try {
- _isolate = await FlutterIsolate.spawn(
- _isolateMain,
- _receivePort.sendPort,
- );
- _mainSendPort = await _receivePort.first as SendPort;
- isSpawned = true;
- _resetInactivityTimer();
- } catch (e) {
- _logger.severe('Could not spawn isolate', e);
- isSpawned = false;
- }
- });
- }
- Future<void> ensureSpawned() async {
- if (!isSpawned) {
- await init();
- }
- }
- @pragma('vm:entry-point')
- static void _isolateMain(SendPort mainSendPort) async {
- final receivePort = ReceivePort();
- mainSendPort.send(receivePort.sendPort);
- receivePort.listen((message) async {
- final functionIndex = message[0] as int;
- final function = ImageOperation.values[functionIndex];
- final args = message[1] as Map<String, dynamic>;
- final sendPort = message[2] as SendPort;
- try {
- switch (function) {
- case ImageOperation.preprocessBlazeFace:
- final imageData = args['imageData'] as Uint8List;
- final normalize = args['normalize'] as bool;
- final int normalization = normalize ? 2 : -1;
- final requiredWidth = args['requiredWidth'] as int;
- final requiredHeight = args['requiredHeight'] as int;
- final qualityIndex = args['quality'] as int;
- final maintainAspectRatio = args['maintainAspectRatio'] as bool;
- final quality = FilterQuality.values[qualityIndex];
- final (result, originalSize, newSize) =
- await preprocessImageToMatrix(
- imageData,
- normalization: normalization,
- requiredWidth: requiredWidth,
- requiredHeight: requiredHeight,
- quality: quality,
- maintainAspectRatio: maintainAspectRatio,
- );
- sendPort.send({
- 'inputs': result,
- 'originalWidth': originalSize.width,
- 'originalHeight': originalSize.height,
- 'newWidth': newSize.width,
- 'newHeight': newSize.height,
- });
- case ImageOperation.preprocessYoloOnnx:
- final imageData = args['imageData'] as Uint8List;
- final normalize = args['normalize'] as bool;
- final int normalization = normalize ? 1 : -1;
- final requiredWidth = args['requiredWidth'] as int;
- final requiredHeight = args['requiredHeight'] as int;
- final maintainAspectRatio = args['maintainAspectRatio'] as bool;
- final Image image = await decodeImageFromData(imageData);
- final imageByteData = await getByteDataFromImage(image);
- final (result, originalSize, newSize) =
- await preprocessImageToFloat32ChannelsFirst(
- image,
- imageByteData,
- normalization: normalization,
- requiredWidth: requiredWidth,
- requiredHeight: requiredHeight,
- maintainAspectRatio: maintainAspectRatio,
- );
- sendPort.send({
- 'inputs': result,
- 'originalWidth': originalSize.width,
- 'originalHeight': originalSize.height,
- 'newWidth': newSize.width,
- 'newHeight': newSize.height,
- });
- case ImageOperation.preprocessFaceAlign:
- final imageData = args['imageData'] as Uint8List;
- final faceLandmarks =
- args['faceLandmarks'] as List<List<List<double>>>;
- final List<Uint8List> result = await preprocessFaceAlignToUint8List(
- imageData,
- faceLandmarks,
- );
- sendPort.send(List.from(result));
- case ImageOperation.preprocessMobileFaceNet:
- final imageData = args['imageData'] as Uint8List;
- final facesJson = args['facesJson'] as List<Map<String, dynamic>>;
- final (
- inputs,
- alignmentResults,
- isBlurs,
- blurValues,
- originalSize
- ) = await preprocessToMobileFaceNetInput(
- imageData,
- facesJson,
- );
- final List<Map<String, dynamic>> alignmentResultsJson =
- alignmentResults.map((result) => result.toJson()).toList();
- sendPort.send({
- 'inputs': inputs,
- 'alignmentResultsJson': alignmentResultsJson,
- 'isBlurs': isBlurs,
- 'blurValues': blurValues,
- 'originalWidth': originalSize.width,
- 'originalHeight': originalSize.height,
- });
- case ImageOperation.preprocessMobileFaceNetOnnx:
- final imagePath = args['imagePath'] as String;
- final facesJson = args['facesJson'] as List<Map<String, dynamic>>;
- final List<FaceDetectionRelative> relativeFaces = facesJson
- .map((face) => FaceDetectionRelative.fromJson(face))
- .toList();
- final imageData = await File(imagePath).readAsBytes();
- final Image image = await decodeImageFromData(imageData);
- final imageByteData = await getByteDataFromImage(image);
- final (
- inputs,
- alignmentResults,
- isBlurs,
- blurValues,
- originalSize
- ) = await preprocessToMobileFaceNetFloat32List(
- image,
- imageByteData,
- relativeFaces,
- );
- final List<Map<String, dynamic>> alignmentResultsJson =
- alignmentResults.map((result) => result.toJson()).toList();
- sendPort.send({
- 'inputs': inputs,
- 'alignmentResultsJson': alignmentResultsJson,
- 'isBlurs': isBlurs,
- 'blurValues': blurValues,
- 'originalWidth': originalSize.width,
- 'originalHeight': originalSize.height,
- });
- case ImageOperation.generateFaceThumbnails:
- final imagePath = args['imagePath'] as String;
- final Uint8List imageData = await File(imagePath).readAsBytes();
- final faceBoxesJson =
- args['faceBoxesList'] as List<Map<String, dynamic>>;
- final List<FaceBox> faceBoxes =
- faceBoxesJson.map((json) => FaceBox.fromJson(json)).toList();
- final List<Uint8List> results = await generateFaceThumbnails(
- imageData,
- faceBoxes,
- );
- sendPort.send(List.from(results));
- case ImageOperation.cropAndPadFace:
- final imageData = args['imageData'] as Uint8List;
- final faceBox = args['faceBox'] as List<double>;
- final Uint8List result =
- await cropAndPadFaceData(imageData, faceBox);
- sendPort.send(<dynamic>[result]);
- }
- } catch (e, stackTrace) {
- sendPort
- .send({'error': e.toString(), 'stackTrace': stackTrace.toString()});
- }
- });
- }
- /// The common method to run any operation in the isolate. It sends the [message] to [_isolateMain] and waits for the result.
- Future<dynamic> _runInIsolate(
- (ImageOperation, Map<String, dynamic>) message,
- ) async {
- await ensureSpawned();
- return _functionLock.synchronized(() async {
- _resetInactivityTimer();
- final completer = Completer<dynamic>();
- final answerPort = ReceivePort();
- _activeTasks++;
- _mainSendPort.send([message.$1.index, message.$2, answerPort.sendPort]);
- answerPort.listen((receivedMessage) {
- if (receivedMessage is Map && receivedMessage.containsKey('error')) {
- // Handle the error
- final errorMessage = receivedMessage['error'];
- final errorStackTrace = receivedMessage['stackTrace'];
- final exception = Exception(errorMessage);
- final stackTrace = StackTrace.fromString(errorStackTrace);
- completer.completeError(exception, stackTrace);
- } else {
- completer.complete(receivedMessage);
- }
- });
- _activeTasks--;
- return completer.future;
- });
- }
- /// Resets a timer that kills the isolate after a certain amount of inactivity.
- ///
- /// Should be called after initialization (e.g. inside `init()`) and after every call to isolate (e.g. inside `_runInIsolate()`)
- void _resetInactivityTimer() {
- _inactivityTimer?.cancel();
- _inactivityTimer = Timer(_inactivityDuration, () {
- if (_activeTasks > 0) {
- _logger.info('Tasks are still running. Delaying isolate disposal.');
- // Optionally, reschedule the timer to check again later.
- _resetInactivityTimer();
- } else {
- _logger.info(
- 'Clustering Isolate has been inactive for ${_inactivityDuration.inSeconds} seconds with no tasks running. Killing isolate.',
- );
- dispose();
- }
- });
- }
- /// Disposes the isolate worker.
- void dispose() {
- if (!isSpawned) return;
- isSpawned = false;
- _isolate.kill();
- _receivePort.close();
- _inactivityTimer?.cancel();
- }
- /// Preprocesses [imageData] for standard ML models inside a separate isolate.
- ///
- /// Returns a [Num3DInputMatrix] image usable for ML inference with BlazeFace.
- ///
- /// Uses [preprocessImageToMatrix] inside the isolate.
- @Deprecated("No longer using BlazeFace")
- Future<(Num3DInputMatrix, Size, Size)> preprocessImageBlazeFace(
- Uint8List imageData, {
- required bool normalize,
- required int requiredWidth,
- required int requiredHeight,
- FilterQuality quality = FilterQuality.medium,
- bool maintainAspectRatio = true,
- }) async {
- final Map<String, dynamic> results = await _runInIsolate(
- (
- ImageOperation.preprocessBlazeFace,
- {
- 'imageData': imageData,
- 'normalize': normalize,
- 'requiredWidth': requiredWidth,
- 'requiredHeight': requiredHeight,
- 'quality': quality.index,
- 'maintainAspectRatio': maintainAspectRatio,
- },
- ),
- );
- final inputs = results['inputs'] as Num3DInputMatrix;
- final originalSize = Size(
- results['originalWidth'] as double,
- results['originalHeight'] as double,
- );
- final newSize = Size(
- results['newWidth'] as double,
- results['newHeight'] as double,
- );
- return (inputs, originalSize, newSize);
- }
- /// Uses [preprocessImageToFloat32ChannelsFirst] inside the isolate.
- @Deprecated(
- "Old method, not needed since we now run the whole ML pipeline for faces in a single isolate",
- )
- Future<(Float32List, Dimensions, Dimensions)> preprocessImageYoloOnnx(
- Uint8List imageData, {
- required bool normalize,
- required int requiredWidth,
- required int requiredHeight,
- FilterQuality quality = FilterQuality.medium,
- bool maintainAspectRatio = true,
- }) async {
- final Map<String, dynamic> results = await _runInIsolate(
- (
- ImageOperation.preprocessYoloOnnx,
- {
- 'imageData': imageData,
- 'normalize': normalize,
- 'requiredWidth': requiredWidth,
- 'requiredHeight': requiredHeight,
- 'quality': quality.index,
- 'maintainAspectRatio': maintainAspectRatio,
- },
- ),
- );
- final inputs = results['inputs'] as Float32List;
- final originalSize = Dimensions(
- width:results['originalWidth'] as int,
- height: results['originalHeight'] as int,
- );
- final newSize = Dimensions(
- width: results['newWidth'] as int,
- height: results['newHeight'] as int,
- );
- return (inputs, originalSize, newSize);
- }
- /// Preprocesses [imageData] for face alignment inside a separate isolate, to display the aligned faces. Mostly used for debugging.
- ///
- /// Returns a list of [Uint8List] images, one for each face, in png format.
- ///
- /// Uses [preprocessFaceAlignToUint8List] inside the isolate.
- ///
- /// WARNING: For preprocessing for MobileFaceNet, use [preprocessMobileFaceNet] instead!
- @Deprecated(
- "Old method, not needed since we now run the whole ML pipeline for faces in a single isolate",
- )
- Future<List<Uint8List>> preprocessFaceAlign(
- Uint8List imageData,
- List<FaceDetectionAbsolute> faces,
- ) async {
- final faceLandmarks = faces.map((face) => face.allKeypoints).toList();
- return await _runInIsolate(
- (
- ImageOperation.preprocessFaceAlign,
- {
- 'imageData': imageData,
- 'faceLandmarks': faceLandmarks,
- },
- ),
- ).then((value) => value.cast<Uint8List>());
- }
- /// Preprocesses [imageData] for MobileFaceNet input inside a separate isolate.
- ///
- /// Returns a list of [Num3DInputMatrix] images, one for each face.
- ///
- /// Uses [preprocessToMobileFaceNetInput] inside the isolate.
- @Deprecated("Old method used in TensorFlow Lite")
- Future<
- (
- List<Num3DInputMatrix>,
- List<AlignmentResult>,
- List<bool>,
- List<double>,
- Size,
- )> preprocessMobileFaceNet(
- Uint8List imageData,
- List<FaceDetectionRelative> faces,
- ) async {
- final List<Map<String, dynamic>> facesJson =
- faces.map((face) => face.toJson()).toList();
- final Map<String, dynamic> results = await _runInIsolate(
- (
- ImageOperation.preprocessMobileFaceNet,
- {
- 'imageData': imageData,
- 'facesJson': facesJson,
- },
- ),
- );
- final inputs = results['inputs'] as List<Num3DInputMatrix>;
- final alignmentResultsJson =
- results['alignmentResultsJson'] as List<Map<String, dynamic>>;
- final alignmentResults = alignmentResultsJson.map((json) {
- return AlignmentResult.fromJson(json);
- }).toList();
- final isBlurs = results['isBlurs'] as List<bool>;
- final blurValues = results['blurValues'] as List<double>;
- final originalSize = Size(
- results['originalWidth'] as double,
- results['originalHeight'] as double,
- );
- return (inputs, alignmentResults, isBlurs, blurValues, originalSize);
- }
- /// Uses [preprocessToMobileFaceNetFloat32List] inside the isolate.
- @Deprecated(
- "Old method, not needed since we now run the whole ML pipeline for faces in a single isolate",
- )
- Future<(Float32List, List<AlignmentResult>, List<bool>, List<double>, Size)>
- preprocessMobileFaceNetOnnx(
- String imagePath,
- List<FaceDetectionRelative> faces,
- ) async {
- final List<Map<String, dynamic>> facesJson =
- faces.map((face) => face.toJson()).toList();
- final Map<String, dynamic> results = await _runInIsolate(
- (
- ImageOperation.preprocessMobileFaceNetOnnx,
- {
- 'imagePath': imagePath,
- 'facesJson': facesJson,
- },
- ),
- );
- final inputs = results['inputs'] as Float32List;
- final alignmentResultsJson =
- results['alignmentResultsJson'] as List<Map<String, dynamic>>;
- final alignmentResults = alignmentResultsJson.map((json) {
- return AlignmentResult.fromJson(json);
- }).toList();
- final isBlurs = results['isBlurs'] as List<bool>;
- final blurValues = results['blurValues'] as List<double>;
- final originalSize = Size(
- results['originalWidth'] as double,
- results['originalHeight'] as double,
- );
- return (inputs, alignmentResults, isBlurs, blurValues, originalSize);
- }
- /// Generates face thumbnails for all [faceBoxes] in [imageData].
- ///
- /// Uses [generateFaceThumbnails] inside the isolate.
- Future<List<Uint8List>> generateFaceThumbnailsForImage(
- String imagePath,
- List<FaceBox> faceBoxes,
- ) async {
- final List<Map<String, dynamic>> faceBoxesJson =
- faceBoxes.map((box) => box.toJson()).toList();
- return await _runInIsolate(
- (
- ImageOperation.generateFaceThumbnails,
- {
- 'imagePath': imagePath,
- 'faceBoxesList': faceBoxesJson,
- },
- ),
- ).then((value) => value.cast<Uint8List>());
- }
- @Deprecated('For second pass of BlazeFace, no longer used')
- /// Generates cropped and padded image data from [imageData] and a [faceBox].
- ///
- /// The steps are:
- /// 1. Crop the image to the face bounding box
- /// 2. Resize this cropped image to a square that is half the BlazeFace input size
- /// 3. Pad the image to the BlazeFace input size
- ///
- /// Uses [cropAndPadFaceData] inside the isolate.
- Future<Uint8List> cropAndPadFace(
- Uint8List imageData,
- List<double> faceBox,
- ) async {
- return await _runInIsolate(
- (
- ImageOperation.cropAndPadFace,
- {
- 'imageData': imageData,
- 'faceBox': List<double>.from(faceBox),
- },
- ),
- ).then((value) => value[0] as Uint8List);
- }
- }
|