Manav Rathi 1 year ago
parent
commit
33d3428222

+ 18 - 4
web/apps/photos/src/services/face/f-index.ts

@@ -1,3 +1,4 @@
+import { FILE_TYPE } from "@/media/file-type";
 import { openCache } from "@/next/blob-cache";
 import log from "@/next/log";
 import { workerBridge } from "@/next/worker/worker-bridge";
@@ -22,7 +23,6 @@ import {
     createGrayscaleIntMatrixFromNormalized2List,
     cropWithRotation,
     fetchImageBitmap,
-    getFaceId,
     getLocalFileImageBitmap,
     getPixelBilinear,
     getThumbnailImageBitmap,
@@ -31,7 +31,6 @@ import {
     warpAffineFloat32List,
 } from "./image";
 import { transformFaceDetections } from "./transform-box";
-import { FILE_TYPE } from "@/media/file-type";
 
 /**
  * Index faces in the given file.
@@ -131,7 +130,7 @@ const syncFileFaceDetections = async (fileContext: MLSyncFileContext) => {
     fileContext.newDetection = true;
     const imageBitmap = await fetchImageBitmapForContext(fileContext);
     const faceDetections = await detectFaces(imageBitmap);
-    // TODO: reenable faces filtering based on width
+    // TODO-ML(MR): reenable faces filtering based on width
     const detectedFaces = faceDetections?.map((detection) => {
         return {
             fileId: fileContext.enteFile.id,
@@ -140,7 +139,7 @@ const syncFileFaceDetections = async (fileContext: MLSyncFileContext) => {
     });
     newMlFile.faces = detectedFaces?.map((detectedFace) => ({
         ...detectedFace,
-        id: getFaceId(detectedFace, newMlFile.imageDimensions),
+        id: makeFaceID(detectedFace, newMlFile.imageDimensions),
     }));
     // ?.filter((f) =>
     //     f.box.width > syncContext.config.faceDetection.minFaceSize
@@ -526,6 +525,21 @@ function normalizeLandmarks(
     ) as Array<[number, number]>;
 }
 
+const makeFaceID = (detectedFace: DetectedFace, imageDims: Dimensions) => {
+    const part = (v: number) => clamp(v, 0.0, 0.999999).toFixed(5).substring(2);
+    const xMin = part(detectedFace.detection.box.x / imageDims.width);
+    const yMin = part(detectedFace.detection.box.y / imageDims.height);
+    const xMax = part(
+        (detectedFace.detection.box.x + detectedFace.detection.box.width) /
+            imageDims.width,
+    );
+    const yMax = part(
+        (detectedFace.detection.box.y + detectedFace.detection.box.height) /
+            imageDims.height,
+    );
+    return [detectedFace.fileId, xMin, yMin, xMax, yMax].join("_");
+};
+
 /**
  * Laplacian blur detection.
  */

+ 1 - 19
web/apps/photos/src/services/face/image.ts

@@ -4,7 +4,7 @@ import log from "@/next/log";
 import { Matrix, inverse } from "ml-matrix";
 import DownloadManager from "services/download";
 import { Box, Dimensions, enlargeBox } from "services/face/geom";
-import { DetectedFace, FaceAlignment } from "services/face/types";
+import { FaceAlignment } from "services/face/types";
 import { getLocalFiles } from "services/fileService";
 import { EnteFile } from "types/file";
 import { getRenderableImage } from "utils/file";
@@ -20,24 +20,6 @@ export async function getLocalFile(fileId: number) {
     return localFiles.find((f) => f.id === fileId);
 }
 
-export function getFaceId(detectedFace: DetectedFace, imageDims: Dimensions) {
-    const part = (v: number) => clamp(v, 0.0, 0.999999).toFixed(5).substring(2);
-
-    const xMin = part(detectedFace.detection.box.x / imageDims.width);
-    const yMin = part(detectedFace.detection.box.y / imageDims.height);
-    const xMax = part(
-        (detectedFace.detection.box.x + detectedFace.detection.box.width) /
-            imageDims.width,
-    );
-    const yMax = part(
-        (detectedFace.detection.box.y + detectedFace.detection.box.height) /
-            imageDims.height,
-    );
-
-    const rawFaceID = `${xMin}_${yMin}_${xMax}_${yMax}`;
-    return `${detectedFace.fileId}_${rawFaceID}`;
-}
-
 export const fetchImageBitmap = async (file: EnteFile) =>
     fetchRenderableBlob(file).then(createImageBitmap);