diff --git a/web/apps/photos/src/components/MachineLearning/MLFileDebugView.tsx b/web/apps/photos/src/components/MachineLearning/MLFileDebugView.tsx deleted file mode 100644 index a6c96476b..000000000 --- a/web/apps/photos/src/components/MachineLearning/MLFileDebugView.tsx +++ /dev/null @@ -1,228 +0,0 @@ -import { addLogLine } from "@ente/shared/logging"; -import "@tensorflow/tfjs-backend-cpu"; -import "@tensorflow/tfjs-backend-webgl"; -import { DEFAULT_ML_SYNC_CONFIG } from "constants/mlConfig"; -import { useEffect, useRef, useState } from "react"; -import arcfaceAlignmentService from "services/machineLearning/arcfaceAlignmentService"; -import arcfaceCropService from "services/machineLearning/arcfaceCropService"; -import blazeFaceDetectionService from "services/machineLearning/blazeFaceDetectionService"; -import imageSceneService from "services/machineLearning/imageSceneService"; -import ssdMobileNetV2Service from "services/machineLearning/ssdMobileNetV2Service"; -import { AlignedFace, FaceCrop, ObjectDetection } from "types/machineLearning"; -import { getMLSyncConfig } from "utils/machineLearning/config"; -import { - getAlignedFaceBox, - ibExtractFaceImage, - ibExtractFaceImageUsingTransform, -} from "utils/machineLearning/faceAlign"; -import { ibExtractFaceImageFromCrop } from "utils/machineLearning/faceCrop"; -import { FaceCropsRow, FaceImagesRow, ImageBitmapView } from "./ImageViews"; - -interface MLFileDebugViewProps { - file: File; -} - -function drawFaceDetection(face: AlignedFace, ctx: CanvasRenderingContext2D) { - const pointSize = Math.ceil( - Math.max(ctx.canvas.width / 512, face.detection.box.width / 32), - ); - - ctx.save(); - ctx.strokeStyle = "rgba(255, 0, 0, 0.8)"; - ctx.lineWidth = pointSize; - ctx.strokeRect( - face.detection.box.x, - face.detection.box.y, - face.detection.box.width, - face.detection.box.height, - ); - ctx.restore(); - - ctx.save(); - ctx.strokeStyle = "rgba(0, 255, 0, 0.8)"; - ctx.lineWidth = Math.round(pointSize * 1.5); - const alignedBox = getAlignedFaceBox(face.alignment); - ctx.strokeRect( - alignedBox.x, - alignedBox.y, - alignedBox.width, - alignedBox.height, - ); - ctx.restore(); - - ctx.save(); - ctx.fillStyle = "rgba(0, 0, 255, 0.8)"; - face.detection.landmarks.forEach((l) => { - ctx.beginPath(); - ctx.arc(l.x, l.y, pointSize, 0, Math.PI * 2, true); - ctx.fill(); - }); - ctx.restore(); -} - -function drawBbox(object: ObjectDetection, ctx: CanvasRenderingContext2D) { - ctx.font = "100px Arial"; - ctx.save(); - ctx.restore(); - ctx.rect(...object.bbox); - ctx.lineWidth = 10; - ctx.strokeStyle = "green"; - ctx.fillStyle = "green"; - ctx.stroke(); - ctx.fillText( - object.score.toFixed(3) + " " + object.class, - object.bbox[0], - object.bbox[1] > 10 ? object.bbox[1] - 5 : 10, - ); -} - -export default function MLFileDebugView(props: MLFileDebugViewProps) { - // const [imageBitmap, setImageBitmap] = useState(); - const [faceCrops, setFaceCrops] = useState(); - const [facesUsingCrops, setFacesUsingCrops] = useState(); - const [facesUsingImage, setFacesUsingImage] = useState(); - const [facesUsingTransform, setFacesUsingTransform] = - useState(); - - const canvasRef = useRef(null); - - useEffect(() => { - let didCancel = false; - const loadFile = async () => { - // TODO: go through worker for these apis, to not include ml code in main bundle - const imageBitmap = await createImageBitmap(props.file); - const faceDetections = - await blazeFaceDetectionService.detectFaces(imageBitmap); - addLogLine("detectedFaces: ", faceDetections.length); - - const objectDetections = await ssdMobileNetV2Service.detectObjects( - imageBitmap, - DEFAULT_ML_SYNC_CONFIG.objectDetection.maxNumBoxes, - DEFAULT_ML_SYNC_CONFIG.objectDetection.minScore, - ); - addLogLine("detectedObjects: ", JSON.stringify(objectDetections)); - - const sceneDetections = await imageSceneService.detectScenes( - imageBitmap, - DEFAULT_ML_SYNC_CONFIG.sceneDetection.minScore, - ); - addLogLine("detectedScenes: ", JSON.stringify(sceneDetections)); - - const mlSyncConfig = await getMLSyncConfig(); - const faceCropPromises = faceDetections.map(async (faceDetection) => - arcfaceCropService.getFaceCrop( - imageBitmap, - faceDetection, - mlSyncConfig.faceCrop, - ), - ); - - const faceCrops = await Promise.all(faceCropPromises); - if (didCancel) return; - setFaceCrops(faceCrops); - - const faceAlignments = faceDetections.map((detection) => - arcfaceAlignmentService.getFaceAlignment(detection), - ); - addLogLine("alignedFaces: ", JSON.stringify(faceAlignments)); - - const canvas: HTMLCanvasElement = canvasRef.current; - canvas.width = imageBitmap.width; - canvas.height = imageBitmap.height; - const ctx = canvas.getContext("2d"); - if (didCancel) return; - ctx.drawImage(imageBitmap, 0, 0); - const alignedFaces = faceAlignments.map((alignment, i) => { - return { - detection: faceDetections[i], - alignment, - } as AlignedFace; - }); - alignedFaces.forEach((alignedFace) => - drawFaceDetection(alignedFace, ctx), - ); - - objectDetections.forEach((object) => drawBbox(object, ctx)); - - const facesUsingCrops = await Promise.all( - alignedFaces.map((face, i) => { - return ibExtractFaceImageFromCrop( - faceCrops[i], - face.alignment, - 112, - ); - }), - ); - const facesUsingImage = await Promise.all( - alignedFaces.map((face) => { - return ibExtractFaceImage(imageBitmap, face.alignment, 112); - }), - ); - const facesUsingTransform = await Promise.all( - alignedFaces.map((face) => { - return ibExtractFaceImageUsingTransform( - imageBitmap, - face.alignment, - 112, - ); - }), - ); - - if (didCancel) return; - setFacesUsingCrops(facesUsingCrops); - setFacesUsingImage(facesUsingImage); - setFacesUsingTransform(facesUsingTransform); - }; - - props.file && loadFile(); - return () => { - didCancel = true; - }; - }, [props.file]); - - return ( -
-

- {/* */} - -

-
Face Crops:
- - {faceCrops?.map((faceCrop, i) => ( - - ))} - - -

- -
Face Images using face crops:
- - {facesUsingCrops?.map((image, i) => ( - - ))} - - -
Face Images using original image:
- - {facesUsingImage?.map((image, i) => ( - - ))} - - -
Face Images using transfrom:
- - {facesUsingTransform?.map((image, i) => ( - - ))} - -
- ); -} diff --git a/web/apps/photos/src/components/MachineLearning/MLServiceFileInfoButton.tsx b/web/apps/photos/src/components/MachineLearning/MLServiceFileInfoButton.tsx deleted file mode 100644 index 8146e239d..000000000 --- a/web/apps/photos/src/components/MachineLearning/MLServiceFileInfoButton.tsx +++ /dev/null @@ -1,60 +0,0 @@ -import { getToken, getUserID } from "@ente/shared/storage/localStorage/helpers"; -import { useState } from "react"; -import { Button, Spinner } from "react-bootstrap"; -import { EnteFile } from "types/file"; -import mlService from "../../services/machineLearning/machineLearningService"; - -function MLServiceFileInfoButton({ - file, - updateMLDataIndex, - setUpdateMLDataIndex, -}: { - file: EnteFile; - updateMLDataIndex: number; - setUpdateMLDataIndex: (num: number) => void; -}) { - const [mlServiceRunning, setMlServiceRunning] = useState(false); - - const runMLService = async () => { - setMlServiceRunning(true); - const token = getToken(); - const userID = getUserID(); - - // index 4 is for timeout of 240 seconds - await mlService.syncLocalFile(token, userID, file as EnteFile, null, 4); - - setUpdateMLDataIndex(updateMLDataIndex + 1); - setMlServiceRunning(false); - }; - - return ( -
- -
- ); -} - -export default MLServiceFileInfoButton; diff --git a/web/apps/photos/src/components/PhotoViewer/FileInfo/index.tsx b/web/apps/photos/src/components/PhotoViewer/FileInfo/index.tsx index df2da85b7..0b4054e27 100644 --- a/web/apps/photos/src/components/PhotoViewer/FileInfo/index.tsx +++ b/web/apps/photos/src/components/PhotoViewer/FileInfo/index.tsx @@ -10,11 +10,24 @@ import TextSnippetOutlined from "@mui/icons-material/TextSnippetOutlined"; import { Box, DialogProps, Link, Stack, styled } from "@mui/material"; import { Chip } from "components/Chip"; import { EnteDrawer } from "components/EnteDrawer"; +import { ObjectLabelList } from "components/MachineLearning/ObjectList"; +import { + PhotoPeopleList, + UnidentifiedFaces, +} from "components/MachineLearning/PeopleList"; import Titlebar from "components/Titlebar"; import LinkButton from "components/pages/gallery/LinkButton"; +import { t } from "i18next"; +import { AppContext } from "pages/_app"; +import { GalleryContext } from "pages/gallery"; import { useContext, useEffect, useMemo, useState } from "react"; import { getEXIFLocation } from "services/upload/exifService"; import { EnteFile } from "types/file"; +import { PublicCollectionGalleryContext } from "utils/publicCollectionGallery"; +import { + getMapDisableConfirmationDialog, + getMapEnableConfirmationDialog, +} from "utils/ui"; import { ExifData } from "./ExifData"; import InfoItem from "./InfoItem"; import MapBox from "./MapBox"; @@ -22,23 +35,6 @@ import { RenderCaption } from "./RenderCaption"; import { RenderCreationTime } from "./RenderCreationTime"; import { RenderFileName } from "./RenderFileName"; -import { - PhotoPeopleList, - UnidentifiedFaces, -} from "components/MachineLearning/PeopleList"; - -import { ObjectLabelList } from "components/MachineLearning/ObjectList"; - -// import MLServiceFileInfoButton from 'components/MachineLearning/MLServiceFileInfoButton'; -import { t } from "i18next"; -import { AppContext } from "pages/_app"; -import { GalleryContext } from "pages/gallery"; -import { PublicCollectionGalleryContext } from "utils/publicCollectionGallery"; -import { - getMapDisableConfirmationDialog, - getMapEnableConfirmationDialog, -} from "utils/ui"; - export const FileInfoSidebar = styled((props: DialogProps) => ( ))({ @@ -352,14 +348,6 @@ export function FileInfo({ file={file} updateMLDataIndex={updateMLDataIndex} /> - - {/* - - */} )}