diff --git a/desktop/docs/dependencies.md b/desktop/docs/dependencies.md index 9cced1f81..62f70e8e4 100644 --- a/desktop/docs/dependencies.md +++ b/desktop/docs/dependencies.md @@ -1,8 +1,8 @@ # Dependencies -* [Electron](#electron) -* [Dev dependencies](#dev) -* [Functionality](#functionality) +- [Electron](#electron) +- [Dev dependencies](#dev) +- [Functionality](#functionality) ## Electron @@ -114,8 +114,8 @@ available on the host machine, and is not bundled with our app. AI/ML runtime. It powers both natural language searches (using CLIP) and face detection (using YOLO). -[jpeg-js](https://github.com/jpeg-js/jpeg-js#readme) is used for decoding -JPEG data into raw RGB bytes before passing it to ONNX. +[jpeg-js](https://github.com/jpeg-js/jpeg-js#readme) is used for decoding JPEG +data into raw RGB bytes before passing it to ONNX. html-entities is used by the bundled clip-bpe-ts tokenizer for CLIP. diff --git a/desktop/src/main/services/ml-face.ts b/desktop/src/main/services/ml-face.ts index 63b7a9d02..62865ff23 100644 --- a/desktop/src/main/services/ml-face.ts +++ b/desktop/src/main/services/ml-face.ts @@ -139,5 +139,6 @@ export const faceEmbedding = async (input: Float32Array) => { const feeds = { img_inputs: inputTensor }; const results = await session.run(feeds); log.debug(() => `onnx/yolo face embedding took ${Date.now() - t} ms`); - return results.embeddings["cpuData"]; // as Float32Array; + // TODO: What's with this type? + return (results.embeddings as unknown as any)["cpuData"]; // as Float32Array; };