Consolidate types

This commit is contained in:
Manav Rathi 2024-05-14 10:19:42 +05:30
parent 5c9b3f551a
commit 068ed78fe0
No known key found for this signature in database
33 changed files with 90 additions and 90 deletions

View file

@ -5,7 +5,7 @@ import { t } from "i18next";
import { AppContext } from "pages/_app";
import { useContext } from "react";
import { components } from "react-select";
import { IndexStatus } from "types/machineLearning/ui";
import { IndexStatus } from "utils/storage/mlIDbStorage";
import { Suggestion, SuggestionType } from "types/search";
const { Menu } = components;

View file

@ -17,7 +17,7 @@ import {
import { Collection } from "types/collection";
import { LocationTagData } from "types/entity";
import { EnteFile } from "types/file";
import { Person } from "types/machineLearning";
import { Person } from "services/ml/types";
import {
ClipSearchScores,
DateValue,

View file

@ -4,7 +4,7 @@ import { Legend } from "components/PhotoViewer/styledComponents/Legend";
import { t } from "i18next";
import React, { useEffect, useState } from "react";
import { EnteFile } from "types/file";
import { Face, Person } from "types/machineLearning";
import { Face, Person } from "services/ml/types";
import { getPeopleList, getUnidentifiedFaces } from "utils/machineLearning";
const FaceChipContainer = styled("div")`

View file

@ -1,5 +1,5 @@
import { JobConfig } from "types/common/job";
import { MLSearchConfig, MLSyncConfig } from "types/machineLearning";
import { MLSearchConfig, MLSyncConfig } from "services/ml/types";
export const DEFAULT_ML_SYNC_JOB_CONFIG: JobConfig = {
intervalSec: 5,

View file

@ -4,7 +4,7 @@ import {
FaceAlignmentService,
FaceDetection,
Versioned,
} from "types/machineLearning";
} from "services/ml/types";
import { getArcfaceAlignment } from "utils/machineLearning/faceAlign";
class ArcfaceAlignmentService implements FaceAlignmentService {

View file

@ -5,7 +5,7 @@ import {
FaceCropService,
FaceDetection,
Versioned,
} from "types/machineLearning";
} from "services/ml/types";
import { getArcfaceAlignment } from "utils/machineLearning/faceAlign";
import { getFaceCrop } from "utils/machineLearning/faceCrop";

View file

@ -8,7 +8,7 @@ import {
ClusteringResults,
HdbscanResults,
Versioned,
} from "types/machineLearning";
} from "services/ml/types";
class ClusteringService {
private dbscan: DBSCAN;

View file

@ -6,7 +6,7 @@ import {
ClusteringService,
HdbscanResults,
Versioned,
} from "types/machineLearning";
} from "services/ml/types";
class DbscanClusteringService implements ClusteringService {
public method: Versioned<ClusteringMethod>;

View file

@ -5,7 +5,7 @@ import {
Face,
MLSyncContext,
MLSyncFileContext,
} from "types/machineLearning";
} from "services/ml/types";
import { imageBitmapToBlob } from "utils/image";
import {
areFaceIdsSame,

View file

@ -6,7 +6,7 @@ import {
ClusteringService,
HdbscanResults,
Versioned,
} from "types/machineLearning";
} from "services/ml/types";
class HdbscanClusteringService implements ClusteringService {
public method: Versioned<ClusteringMethod>;

View file

@ -3,7 +3,7 @@ import {
BlurDetectionService,
Face,
Versioned,
} from "types/machineLearning";
} from "services/ml/types";
import { createGrayscaleIntMatrixFromNormalized2List } from "utils/image";
import { mobileFaceNetFaceSize } from "./mobileFaceNetEmbeddingService";

View file

@ -4,7 +4,6 @@ import { ComlinkWorker } from "@/next/worker/comlink-worker";
import { getDedicatedCryptoWorker } from "@ente/shared/crypto";
import { DedicatedCryptoWorker } from "@ente/shared/crypto/internal/crypto.worker";
import PQueue from "p-queue";
import { EnteFile } from "types/file";
import {
BlurDetectionMethod,
BlurDetectionService,
@ -22,7 +21,8 @@ import {
MLLibraryData,
MLSyncConfig,
MLSyncContext,
} from "types/machineLearning";
} from "services/ml/types";
import { EnteFile } from "types/file";
import { logQueueStats } from "utils/machineLearning";
import arcfaceAlignmentService from "./arcfaceAlignmentService";
import arcfaceCropService from "./arcfaceCropService";

View file

@ -6,13 +6,13 @@ import { MAX_ML_SYNC_ERROR_COUNT } from "constants/mlConfig";
import downloadManager from "services/download";
import { putEmbedding } from "services/embeddingService";
import { getLocalFiles } from "services/fileService";
import { EnteFile } from "types/file";
import {
MLSyncContext,
MLSyncFileContext,
MLSyncResult,
MlFileData,
} from "types/machineLearning";
} from "services/ml/types";
import { EnteFile } from "types/file";
import { getMLSyncConfig } from "utils/machineLearning/config";
import { LocalFileMlDataToServerFileMl } from "utils/machineLearning/mldataMappers";
import mlIDbStorage from "utils/storage/mlIDbStorage";

View file

@ -5,9 +5,9 @@ import { eventBus, Events } from "@ente/shared/events";
import { getToken, getUserID } from "@ente/shared/storage/localStorage/helpers";
import debounce from "debounce";
import PQueue from "p-queue";
import { MLSyncResult } from "services/ml/types";
import { JobResult } from "types/common/job";
import { EnteFile } from "types/file";
import { MLSyncResult } from "types/machineLearning";
import { getDedicatedMLWorker } from "utils/comlink/ComlinkMLWorker";
import { SimpleJob } from "utils/common/job";
import { logQueueStats } from "utils/machineLearning";

View file

@ -4,7 +4,7 @@ import {
FaceEmbeddingMethod,
FaceEmbeddingService,
Versioned,
} from "types/machineLearning";
} from "services/ml/types";
export const mobileFaceNetFaceSize = 112;

View file

@ -1,5 +1,5 @@
import log from "@/next/log";
import { Face, MLSyncContext, Person } from "types/machineLearning";
import { Face, MLSyncContext, Person } from "services/ml/types";
import {
findFirstIfSorted,
getAllFacesFromMap,

View file

@ -1,6 +1,6 @@
import { FILE_TYPE } from "@/media/file-type";
import log from "@/next/log";
import { MLSyncContext, MLSyncFileContext } from "types/machineLearning";
import { MLSyncContext, MLSyncFileContext } from "services/ml/types";
import {
getLocalFileImageBitmap,
getOriginalImageBitmap,

View file

@ -1,5 +1,11 @@
import { workerBridge } from "@/next/worker/worker-bridge";
import { euclidean } from "hdbscan";
import {
FaceDetection,
FaceDetectionMethod,
FaceDetectionService,
Versioned,
} from "services/ml/types";
import {
Matrix,
applyToPoint,
@ -8,12 +14,6 @@ import {
translate,
} from "transformation-matrix";
import { Dimensions } from "types/image";
import {
FaceDetection,
FaceDetectionMethod,
FaceDetectionService,
Versioned,
} from "types/machineLearning";
import {
clamp,
getPixelBilinear,

View file

@ -329,3 +329,46 @@ export interface MachineLearningWorker {
close(): void;
}
export interface ClipEmbedding {
embedding: Float32Array;
model: "ggml-clip" | "onnx-clip";
}
/// [`x`] and [y] are the coordinates of the top left corner of the box, so the minimim values
/// [width] and [height] are the width and height of the box.
/// All values are in absolute pixels relative to the original image size.
export interface CenterBox {
x: number;
y: number;
height: number;
width: number;
}
export interface DetectionPoint {
x: number;
y: number;
}
export interface Detection {
box: CenterBox;
landmarks: DetectionPoint[];
}
export interface FileMLFace {
id: string;
confidence: number;
blur: number;
embedding: Float32Array;
detection: Detection;
}
export interface FileML {
fileID: number;
clip?: ClipEmbedding;
faces: Face[];
height: number;
width: number;
version: number;
error?: string;
}

View file

@ -2,10 +2,10 @@ import { FILE_TYPE } from "@/media/file-type";
import log from "@/next/log";
import * as chrono from "chrono-node";
import { t } from "i18next";
import { Person } from "services/ml/types";
import { Collection } from "types/collection";
import { EntityType, LocationTag, LocationTagData } from "types/entity";
import { EnteFile } from "types/file";
import { Person } from "types/machineLearning";
import {
ClipSearchScores,
DateValue,

View file

@ -1,4 +0,0 @@
export interface ClipEmbedding {
embedding: Float32Array;
model: "ggml-clip" | "onnx-clip";
}

View file

@ -1,27 +0,0 @@
/// [`x`] and [y] are the coordinates of the top left corner of the box, so the minimim values
/// [width] and [height] are the width and height of the box.
/// All values are in absolute pixels relative to the original image size.
export interface CenterBox {
x: number;
y: number;
height: number;
width: number;
}
export interface Point {
x: number;
y: number;
}
export interface Detection {
box: CenterBox;
landmarks: Point[];
}
export interface Face {
id: string;
confidence: number;
blur: number;
embedding: Float32Array;
detection: Detection;
}

View file

@ -1,12 +0,0 @@
import { ClipEmbedding } from "./clip";
import { Face } from "./face";
export interface FileML {
fileID: number;
clip?: ClipEmbedding;
faces: Face[];
height: number;
width: number;
version: number;
error?: string;
}

View file

@ -1,7 +0,0 @@
export interface IndexStatus {
outOfSyncFilesExists: boolean;
nSyncedFiles: number;
nTotalFiles: number;
localFilesSynced: boolean;
peopleIndexSynced: boolean;
}

View file

@ -1,9 +1,9 @@
import { FILE_TYPE } from "@/media/file-type";
import { City } from "services/locationSearchService";
import { Person } from "services/ml/types";
import { LocationTagData } from "types/entity";
import { EnteFile } from "types/file";
import { Person } from "types/machineLearning";
import { IndexStatus } from "types/machineLearning/ui";
import { IndexStatus } from "utils/storage/mlIDbStorage";
export enum SuggestionType {
DATE = "DATE",

View file

@ -1,8 +1,8 @@
// these utils only work in env where OffscreenCanvas is available
import { Matrix, inverse } from "ml-matrix";
import { FaceAlignment } from "services/ml/types";
import { BlobOptions, Dimensions } from "types/image";
import { FaceAlignment } from "types/machineLearning";
import { enlargeBox } from "utils/machineLearning";
import { Box } from "../../../thirdparty/face-api/classes";

View file

@ -3,8 +3,8 @@ import {
DEFAULT_ML_SYNC_CONFIG,
DEFAULT_ML_SYNC_JOB_CONFIG,
} from "constants/mlConfig";
import { MLSearchConfig, MLSyncConfig } from "services/ml/types";
import { JobConfig } from "types/common/job";
import { MLSearchConfig, MLSyncConfig } from "types/machineLearning";
import mlIDbStorage, {
ML_SEARCH_CONFIG_NAME,
ML_SYNC_CONFIG_NAME,

View file

@ -1,6 +1,6 @@
import { Matrix } from "ml-matrix";
import { FaceAlignment, FaceDetection } from "services/ml/types";
import { getSimilarityTransformation } from "similarity-transformation";
import { FaceAlignment, FaceDetection } from "types/machineLearning";
import { Point } from "../../../thirdparty/face-api/classes";
const ARCFACE_LANDMARKS = [

View file

@ -1,4 +1,4 @@
import { FaceAlignment, FaceCrop, FaceCropConfig } from "types/machineLearning";
import { FaceAlignment, FaceCrop, FaceCropConfig } from "services/ml/types";
import { cropWithRotation } from "utils/image";
import { enlargeBox } from ".";
import { Box } from "../../../thirdparty/face-api/classes";

View file

@ -4,8 +4,6 @@ import log from "@/next/log";
import PQueue from "p-queue";
import DownloadManager from "services/download";
import { getLocalFiles } from "services/fileService";
import { EnteFile } from "types/file";
import { Dimensions } from "types/image";
import {
DetectedFace,
Face,
@ -13,7 +11,9 @@ import {
MlFileData,
Person,
Versioned,
} from "types/machineLearning";
} from "services/ml/types";
import { EnteFile } from "types/file";
import { Dimensions } from "types/image";
import { getRenderableImage } from "utils/file";
import { clamp, warpAffineFloat32List } from "utils/image";
import mlIDbStorage from "utils/storage/mlIDbStorage";

View file

@ -1,10 +1,10 @@
import {
ClipEmbedding,
Face,
FaceDetection,
Landmark,
MlFileData,
} from "types/machineLearning";
import { ClipEmbedding } from "types/machineLearning/data/clip";
} from "services/ml/types";
export interface FileML extends ServerFileMl {
updatedAt: number;

View file

@ -15,8 +15,15 @@ import {
openDB,
} from "idb";
import isElectron from "is-electron";
import { Face, MLLibraryData, MlFileData, Person } from "types/machineLearning";
import { IndexStatus } from "types/machineLearning/ui";
import { Face, MLLibraryData, MlFileData, Person } from "services/ml/types";
export interface IndexStatus {
outOfSyncFilesExists: boolean;
nSyncedFiles: number;
nTotalFiles: number;
localFilesSynced: boolean;
peopleIndexSynced: boolean;
}
interface Config {}

View file

@ -1,8 +1,8 @@
import log from "@/next/log";
import { expose } from "comlink";
import mlService from "services/machineLearning/machineLearningService";
import { MachineLearningWorker } from "services/ml/types";
import { EnteFile } from "types/file";
import { MachineLearningWorker } from "types/machineLearning";
export class DedicatedMLWorker implements MachineLearningWorker {
constructor() {