Let the web side log errors for the bridged methods
This commit is contained in:
parent
c5bb479c4f
commit
c458b429a0
4 changed files with 151 additions and 215 deletions
|
@ -1,17 +1,11 @@
|
|||
import { logError } from "../main/log";
|
||||
import { keysStore } from "../stores/keys.store";
|
||||
import { safeStorageStore } from "../stores/safeStorage.store";
|
||||
import { uploadStatusStore } from "../stores/upload.store";
|
||||
import { watchStore } from "../stores/watch.store";
|
||||
|
||||
export const clearElectronStore = () => {
|
||||
try {
|
||||
uploadStatusStore.clear();
|
||||
keysStore.clear();
|
||||
safeStorageStore.clear();
|
||||
watchStore.clear();
|
||||
} catch (e) {
|
||||
logError(e, "error while clearing electron store");
|
||||
throw e;
|
||||
}
|
||||
uploadStatusStore.clear();
|
||||
keysStore.clear();
|
||||
safeStorageStore.clear();
|
||||
watchStore.clear();
|
||||
};
|
||||
|
|
|
@ -1,28 +1,16 @@
|
|||
import { safeStorage } from "electron/main";
|
||||
import { logError } from "../main/log";
|
||||
import { safeStorageStore } from "../stores/safeStorage.store";
|
||||
|
||||
export async function setEncryptionKey(encryptionKey: string) {
|
||||
try {
|
||||
const encryptedKey: Buffer =
|
||||
await safeStorage.encryptString(encryptionKey);
|
||||
const b64EncryptedKey = Buffer.from(encryptedKey).toString("base64");
|
||||
safeStorageStore.set("encryptionKey", b64EncryptedKey);
|
||||
} catch (e) {
|
||||
logError(e, "setEncryptionKey failed");
|
||||
throw e;
|
||||
}
|
||||
const encryptedKey: Buffer = await safeStorage.encryptString(encryptionKey);
|
||||
const b64EncryptedKey = Buffer.from(encryptedKey).toString("base64");
|
||||
safeStorageStore.set("encryptionKey", b64EncryptedKey);
|
||||
}
|
||||
|
||||
export async function getEncryptionKey(): Promise<string> {
|
||||
try {
|
||||
const b64EncryptedKey = safeStorageStore.get("encryptionKey");
|
||||
if (b64EncryptedKey) {
|
||||
const keyBuffer = Buffer.from(b64EncryptedKey, "base64");
|
||||
return await safeStorage.decryptString(keyBuffer);
|
||||
}
|
||||
} catch (e) {
|
||||
logError(e, "getEncryptionKey failed");
|
||||
throw e;
|
||||
const b64EncryptedKey = safeStorageStore.get("encryptionKey");
|
||||
if (b64EncryptedKey) {
|
||||
const keyBuffer = Buffer.from(b64EncryptedKey, "base64");
|
||||
return await safeStorage.decryptString(keyBuffer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import chokidar from "chokidar";
|
||||
import { BrowserWindow } from "electron";
|
||||
import path from "path";
|
||||
import { logError } from "../main/log";
|
||||
import log from "../main/log";
|
||||
import { getWatchMappings } from "../services/watch";
|
||||
import { getElectronFile } from "./fs";
|
||||
|
||||
|
@ -38,7 +38,7 @@ export function initWatcher(mainWindow: BrowserWindow) {
|
|||
);
|
||||
})
|
||||
.on("error", (error) => {
|
||||
logError(error, "error while watching files");
|
||||
log.error("Error while watching files", error);
|
||||
});
|
||||
|
||||
return watcher;
|
||||
|
|
|
@ -4,7 +4,7 @@ import fs from "node:fs/promises";
|
|||
import path from "node:path";
|
||||
import { CustomErrors } from "../constants/errors";
|
||||
import { writeStream } from "../main/fs";
|
||||
import log, { logErrorSentry } from "../main/log";
|
||||
import log from "../main/log";
|
||||
import { execAsync, isDev } from "../main/util";
|
||||
import { Model } from "../types/ipc";
|
||||
import Tokenizer from "../utils/clip-bpe-ts/mod";
|
||||
|
@ -78,7 +78,7 @@ async function downloadModel(saveLocation: string, url: string) {
|
|||
|
||||
let imageModelDownloadInProgress: Promise<void> = null;
|
||||
|
||||
export async function getClipImageModelPath(type: "ggml" | "onnx") {
|
||||
const getClipImageModelPath = async (type: "ggml" | "onnx") => {
|
||||
try {
|
||||
const modelSavePath = getModelSavePath(IMAGE_MODEL_NAME[type]);
|
||||
if (imageModelDownloadInProgress) {
|
||||
|
@ -86,7 +86,7 @@ export async function getClipImageModelPath(type: "ggml" | "onnx") {
|
|||
await imageModelDownloadInProgress;
|
||||
} else {
|
||||
if (!existsSync(modelSavePath)) {
|
||||
log.info("clip image model not found, downloading");
|
||||
log.info("CLIP image model not found, downloading");
|
||||
imageModelDownloadInProgress = downloadModel(
|
||||
modelSavePath,
|
||||
IMAGE_MODEL_DOWNLOAD_URL[type],
|
||||
|
@ -96,7 +96,7 @@ export async function getClipImageModelPath(type: "ggml" | "onnx") {
|
|||
const localFileSize = (await fs.stat(modelSavePath)).size;
|
||||
if (localFileSize !== IMAGE_MODEL_SIZE_IN_BYTES[type]) {
|
||||
log.info(
|
||||
`clip image model size mismatch, downloading again got: ${localFileSize}`,
|
||||
`CLIP image model size mismatch, downloading again got: ${localFileSize}`,
|
||||
);
|
||||
imageModelDownloadInProgress = downloadModel(
|
||||
modelSavePath,
|
||||
|
@ -110,21 +110,22 @@ export async function getClipImageModelPath(type: "ggml" | "onnx") {
|
|||
} finally {
|
||||
imageModelDownloadInProgress = null;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let textModelDownloadInProgress: boolean = false;
|
||||
|
||||
export async function getClipTextModelPath(type: "ggml" | "onnx") {
|
||||
const getClipTextModelPath = async (type: "ggml" | "onnx") => {
|
||||
const modelSavePath = getModelSavePath(TEXT_MODEL_NAME[type]);
|
||||
if (textModelDownloadInProgress) {
|
||||
throw Error(CustomErrors.MODEL_DOWNLOAD_PENDING);
|
||||
} else {
|
||||
if (!existsSync(modelSavePath)) {
|
||||
log.info("clip text model not found, downloading");
|
||||
log.info("CLIP text model not found, downloading");
|
||||
textModelDownloadInProgress = true;
|
||||
downloadModel(modelSavePath, TEXT_MODEL_DOWNLOAD_URL[type])
|
||||
.catch(() => {
|
||||
// ignore
|
||||
.catch((e) => {
|
||||
// log but otherwise ignore
|
||||
log.error("CLIP text model download failed", e);
|
||||
})
|
||||
.finally(() => {
|
||||
textModelDownloadInProgress = false;
|
||||
|
@ -134,12 +135,13 @@ export async function getClipTextModelPath(type: "ggml" | "onnx") {
|
|||
const localFileSize = (await fs.stat(modelSavePath)).size;
|
||||
if (localFileSize !== TEXT_MODEL_SIZE_IN_BYTES[type]) {
|
||||
log.info(
|
||||
`clip text model size mismatch, downloading again got: ${localFileSize}`,
|
||||
`CLIP text model size mismatch, downloading again got: ${localFileSize}`,
|
||||
);
|
||||
textModelDownloadInProgress = true;
|
||||
downloadModel(modelSavePath, TEXT_MODEL_DOWNLOAD_URL[type])
|
||||
.catch(() => {
|
||||
// ignore
|
||||
.catch((e) => {
|
||||
// log but otherwise ignore
|
||||
log.error("CLIP text model download failed", e);
|
||||
})
|
||||
.finally(() => {
|
||||
textModelDownloadInProgress = false;
|
||||
|
@ -149,7 +151,7 @@ export async function getClipTextModelPath(type: "ggml" | "onnx") {
|
|||
}
|
||||
}
|
||||
return modelSavePath;
|
||||
}
|
||||
};
|
||||
|
||||
function getGGMLClipPath() {
|
||||
return isDev
|
||||
|
@ -254,169 +256,59 @@ async function computeImageEmbedding_(
|
|||
}
|
||||
}
|
||||
|
||||
export async function computeGGMLImageEmbedding(
|
||||
const computeGGMLImageEmbedding = async (
|
||||
inputFilePath: string,
|
||||
): Promise<Float32Array> {
|
||||
try {
|
||||
const clipModelPath = await getClipImageModelPath("ggml");
|
||||
const ggmlclipPath = getGGMLClipPath();
|
||||
const cmd = IMAGE_EMBEDDING_EXTRACT_CMD.map((cmdPart) => {
|
||||
if (cmdPart === GGMLCLIP_PATH_PLACEHOLDER) {
|
||||
return ggmlclipPath;
|
||||
} else if (cmdPart === CLIP_MODEL_PATH_PLACEHOLDER) {
|
||||
return clipModelPath;
|
||||
} else if (cmdPart === INPUT_PATH_PLACEHOLDER) {
|
||||
return inputFilePath;
|
||||
} else {
|
||||
return cmdPart;
|
||||
}
|
||||
});
|
||||
): Promise<Float32Array> => {
|
||||
const clipModelPath = await getClipImageModelPath("ggml");
|
||||
const ggmlclipPath = getGGMLClipPath();
|
||||
const cmd = IMAGE_EMBEDDING_EXTRACT_CMD.map((cmdPart) => {
|
||||
if (cmdPart === GGMLCLIP_PATH_PLACEHOLDER) {
|
||||
return ggmlclipPath;
|
||||
} else if (cmdPart === CLIP_MODEL_PATH_PLACEHOLDER) {
|
||||
return clipModelPath;
|
||||
} else if (cmdPart === INPUT_PATH_PLACEHOLDER) {
|
||||
return inputFilePath;
|
||||
} else {
|
||||
return cmdPart;
|
||||
}
|
||||
});
|
||||
|
||||
const { stdout } = await execAsync(cmd);
|
||||
// parse stdout and return embedding
|
||||
// get the last line of stdout
|
||||
const lines = stdout.split("\n");
|
||||
const lastLine = lines[lines.length - 1];
|
||||
const embedding = JSON.parse(lastLine);
|
||||
const embeddingArray = new Float32Array(embedding);
|
||||
return embeddingArray;
|
||||
} catch (err) {
|
||||
log.error("Failed to compute GGML image embedding", err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
const { stdout } = await execAsync(cmd);
|
||||
// parse stdout and return embedding
|
||||
// get the last line of stdout
|
||||
const lines = stdout.split("\n");
|
||||
const lastLine = lines[lines.length - 1];
|
||||
const embedding = JSON.parse(lastLine);
|
||||
const embeddingArray = new Float32Array(embedding);
|
||||
return embeddingArray;
|
||||
};
|
||||
|
||||
export async function computeONNXImageEmbedding(
|
||||
const computeONNXImageEmbedding = async (
|
||||
inputFilePath: string,
|
||||
): Promise<Float32Array> {
|
||||
try {
|
||||
const imageSession = await getOnnxImageSession();
|
||||
const t1 = Date.now();
|
||||
const rgbData = await getRGBData(inputFilePath);
|
||||
const feeds = {
|
||||
input: new ort.Tensor("float32", rgbData, [1, 3, 224, 224]),
|
||||
};
|
||||
const t2 = Date.now();
|
||||
const results = await imageSession.run(feeds);
|
||||
log.info(
|
||||
`onnx image embedding time: ${Date.now() - t1} ms (prep:${
|
||||
t2 - t1
|
||||
} ms, extraction: ${Date.now() - t2} ms)`,
|
||||
);
|
||||
const imageEmbedding = results["output"].data; // Float32Array
|
||||
return normalizeEmbedding(imageEmbedding);
|
||||
} catch (err) {
|
||||
log.error("Failed to compute ONNX image embedding", err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
export async function computeTextEmbedding(
|
||||
model: Model,
|
||||
text: string,
|
||||
): Promise<Float32Array> {
|
||||
try {
|
||||
const embedding = computeTextEmbedding_(model, text);
|
||||
return embedding;
|
||||
} catch (err) {
|
||||
if (isExecError(err)) {
|
||||
const parsedExecError = parseExecError(err);
|
||||
throw Error(parsedExecError);
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function computeTextEmbedding_(
|
||||
model: Model,
|
||||
text: string,
|
||||
): Promise<Float32Array> {
|
||||
if (model === Model.GGML_CLIP) {
|
||||
return await computeGGMLTextEmbedding(text);
|
||||
} else {
|
||||
return await computeONNXTextEmbedding(text);
|
||||
}
|
||||
}
|
||||
|
||||
export async function computeGGMLTextEmbedding(
|
||||
text: string,
|
||||
): Promise<Float32Array> {
|
||||
try {
|
||||
const clipModelPath = await getClipTextModelPath("ggml");
|
||||
const ggmlclipPath = getGGMLClipPath();
|
||||
const cmd = TEXT_EMBEDDING_EXTRACT_CMD.map((cmdPart) => {
|
||||
if (cmdPart === GGMLCLIP_PATH_PLACEHOLDER) {
|
||||
return ggmlclipPath;
|
||||
} else if (cmdPart === CLIP_MODEL_PATH_PLACEHOLDER) {
|
||||
return clipModelPath;
|
||||
} else if (cmdPart === INPUT_PATH_PLACEHOLDER) {
|
||||
return text;
|
||||
} else {
|
||||
return cmdPart;
|
||||
}
|
||||
});
|
||||
|
||||
const { stdout } = await execAsync(cmd);
|
||||
// parse stdout and return embedding
|
||||
// get the last line of stdout
|
||||
const lines = stdout.split("\n");
|
||||
const lastLine = lines[lines.length - 1];
|
||||
const embedding = JSON.parse(lastLine);
|
||||
const embeddingArray = new Float32Array(embedding);
|
||||
return embeddingArray;
|
||||
} catch (err) {
|
||||
if (err.message === CustomErrors.MODEL_DOWNLOAD_PENDING) {
|
||||
log.info(CustomErrors.MODEL_DOWNLOAD_PENDING);
|
||||
} else {
|
||||
log.error("Failed to compute GGML text embedding", err);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
export async function computeONNXTextEmbedding(
|
||||
text: string,
|
||||
): Promise<Float32Array> {
|
||||
try {
|
||||
const imageSession = await getOnnxTextSession();
|
||||
const t1 = Date.now();
|
||||
const tokenizer = getTokenizer();
|
||||
const tokenizedText = Int32Array.from(tokenizer.encodeForCLIP(text));
|
||||
const feeds = {
|
||||
input: new ort.Tensor("int32", tokenizedText, [1, 77]),
|
||||
};
|
||||
const t2 = Date.now();
|
||||
const results = await imageSession.run(feeds);
|
||||
log.info(
|
||||
`onnx text embedding time: ${Date.now() - t1} ms (prep:${
|
||||
t2 - t1
|
||||
} ms, extraction: ${Date.now() - t2} ms)`,
|
||||
);
|
||||
const textEmbedding = results["output"].data; // Float32Array
|
||||
return normalizeEmbedding(textEmbedding);
|
||||
} catch (err) {
|
||||
if (err.message === CustomErrors.MODEL_DOWNLOAD_PENDING) {
|
||||
log.info(CustomErrors.MODEL_DOWNLOAD_PENDING);
|
||||
} else {
|
||||
logErrorSentry(err, "Error in computeONNXTextEmbedding");
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
): Promise<Float32Array> => {
|
||||
const imageSession = await getOnnxImageSession();
|
||||
const t1 = Date.now();
|
||||
const rgbData = await getRGBData(inputFilePath);
|
||||
const feeds = {
|
||||
input: new ort.Tensor("float32", rgbData, [1, 3, 224, 224]),
|
||||
};
|
||||
const t2 = Date.now();
|
||||
const results = await imageSession.run(feeds);
|
||||
log.info(
|
||||
`onnx image embedding time: ${Date.now() - t1} ms (prep:${
|
||||
t2 - t1
|
||||
} ms, extraction: ${Date.now() - t2} ms)`,
|
||||
);
|
||||
const imageEmbedding = results["output"].data; // Float32Array
|
||||
return normalizeEmbedding(imageEmbedding);
|
||||
};
|
||||
|
||||
async function getRGBData(inputFilePath: string) {
|
||||
const jpegData = await fs.readFile(inputFilePath);
|
||||
let rawImageData;
|
||||
try {
|
||||
rawImageData = jpeg.decode(jpegData, {
|
||||
useTArray: true,
|
||||
formatAsRGBA: false,
|
||||
});
|
||||
} catch (err) {
|
||||
logErrorSentry(err, "JPEG decode error");
|
||||
throw err;
|
||||
}
|
||||
const rawImageData = jpeg.decode(jpegData, {
|
||||
useTArray: true,
|
||||
formatAsRGBA: false,
|
||||
});
|
||||
|
||||
const nx: number = rawImageData.width;
|
||||
const ny: number = rawImageData.height;
|
||||
|
@ -479,21 +371,7 @@ async function getRGBData(inputFilePath: string) {
|
|||
return result;
|
||||
}
|
||||
|
||||
export const computeClipMatchScore = async (
|
||||
imageEmbedding: Float32Array,
|
||||
textEmbedding: Float32Array,
|
||||
) => {
|
||||
if (imageEmbedding.length !== textEmbedding.length) {
|
||||
throw Error("imageEmbedding and textEmbedding length mismatch");
|
||||
}
|
||||
let score = 0;
|
||||
for (let index = 0; index < imageEmbedding.length; index++) {
|
||||
score += imageEmbedding[index] * textEmbedding[index];
|
||||
}
|
||||
return score;
|
||||
};
|
||||
|
||||
export const normalizeEmbedding = (embedding: Float32Array) => {
|
||||
const normalizeEmbedding = (embedding: Float32Array) => {
|
||||
let normalization = 0;
|
||||
for (let index = 0; index < embedding.length; index++) {
|
||||
normalization += embedding[index] * embedding[index];
|
||||
|
@ -504,3 +382,79 @@ export const normalizeEmbedding = (embedding: Float32Array) => {
|
|||
}
|
||||
return embedding;
|
||||
};
|
||||
|
||||
export async function computeTextEmbedding(
|
||||
model: Model,
|
||||
text: string,
|
||||
): Promise<Float32Array> {
|
||||
try {
|
||||
const embedding = computeTextEmbedding_(model, text);
|
||||
return embedding;
|
||||
} catch (err) {
|
||||
if (isExecError(err)) {
|
||||
const parsedExecError = parseExecError(err);
|
||||
throw Error(parsedExecError);
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function computeTextEmbedding_(
|
||||
model: Model,
|
||||
text: string,
|
||||
): Promise<Float32Array> {
|
||||
if (model === Model.GGML_CLIP) {
|
||||
return await computeGGMLTextEmbedding(text);
|
||||
} else {
|
||||
return await computeONNXTextEmbedding(text);
|
||||
}
|
||||
}
|
||||
|
||||
export async function computeGGMLTextEmbedding(
|
||||
text: string,
|
||||
): Promise<Float32Array> {
|
||||
const clipModelPath = await getClipTextModelPath("ggml");
|
||||
const ggmlclipPath = getGGMLClipPath();
|
||||
const cmd = TEXT_EMBEDDING_EXTRACT_CMD.map((cmdPart) => {
|
||||
if (cmdPart === GGMLCLIP_PATH_PLACEHOLDER) {
|
||||
return ggmlclipPath;
|
||||
} else if (cmdPart === CLIP_MODEL_PATH_PLACEHOLDER) {
|
||||
return clipModelPath;
|
||||
} else if (cmdPart === INPUT_PATH_PLACEHOLDER) {
|
||||
return text;
|
||||
} else {
|
||||
return cmdPart;
|
||||
}
|
||||
});
|
||||
|
||||
const { stdout } = await execAsync(cmd);
|
||||
// parse stdout and return embedding
|
||||
// get the last line of stdout
|
||||
const lines = stdout.split("\n");
|
||||
const lastLine = lines[lines.length - 1];
|
||||
const embedding = JSON.parse(lastLine);
|
||||
const embeddingArray = new Float32Array(embedding);
|
||||
return embeddingArray;
|
||||
}
|
||||
|
||||
export async function computeONNXTextEmbedding(
|
||||
text: string,
|
||||
): Promise<Float32Array> {
|
||||
const imageSession = await getOnnxTextSession();
|
||||
const t1 = Date.now();
|
||||
const tokenizer = getTokenizer();
|
||||
const tokenizedText = Int32Array.from(tokenizer.encodeForCLIP(text));
|
||||
const feeds = {
|
||||
input: new ort.Tensor("int32", tokenizedText, [1, 77]),
|
||||
};
|
||||
const t2 = Date.now();
|
||||
const results = await imageSession.run(feeds);
|
||||
log.info(
|
||||
`onnx text embedding time: ${Date.now() - t1} ms (prep:${
|
||||
t2 - t1
|
||||
} ms, extraction: ${Date.now() - t2} ms)`,
|
||||
);
|
||||
const textEmbedding = results["output"].data; // Float32Array
|
||||
return normalizeEmbedding(textEmbedding);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue