|
@@ -7,11 +7,10 @@ from typing import Any, Literal
|
|
|
|
|
|
import numpy as np
|
|
|
import onnxruntime as ort
|
|
|
-from huggingface_hub import snapshot_download
|
|
|
from PIL import Image
|
|
|
from transformers import AutoTokenizer
|
|
|
|
|
|
-from app.config import log
|
|
|
+from app.config import clean_name, log
|
|
|
from app.models.transforms import crop, get_pil_resampling, normalize, resize, to_numpy
|
|
|
from app.schemas import ModelType, ndarray_f32, ndarray_i32, ndarray_i64
|
|
|
|
|
@@ -117,15 +116,7 @@ class OpenCLIPEncoder(BaseCLIPEncoder):
|
|
|
mode: Literal["text", "vision"] | None = None,
|
|
|
**model_kwargs: Any,
|
|
|
) -> None:
|
|
|
- super().__init__(_clean_model_name(model_name), cache_dir, mode, **model_kwargs)
|
|
|
-
|
|
|
- def _download(self) -> None:
|
|
|
- snapshot_download(
|
|
|
- f"immich-app/{self.model_name}",
|
|
|
- cache_dir=self.cache_dir,
|
|
|
- local_dir=self.cache_dir,
|
|
|
- local_dir_use_symlinks=False,
|
|
|
- )
|
|
|
+ super().__init__(clean_name(model_name), cache_dir, mode, **model_kwargs)
|
|
|
|
|
|
def _load(self) -> None:
|
|
|
super()._load()
|
|
@@ -171,52 +162,3 @@ class MCLIPEncoder(OpenCLIPEncoder):
|
|
|
def tokenize(self, text: str) -> dict[str, ndarray_i32]:
|
|
|
tokens: dict[str, ndarray_i64] = self.tokenizer(text, return_tensors="np")
|
|
|
return {k: v.astype(np.int32) for k, v in tokens.items()}
|
|
|
-
|
|
|
-
|
|
|
-_OPENCLIP_MODELS = {
|
|
|
- "RN50__openai",
|
|
|
- "RN50__yfcc15m",
|
|
|
- "RN50__cc12m",
|
|
|
- "RN101__openai",
|
|
|
- "RN101__yfcc15m",
|
|
|
- "RN50x4__openai",
|
|
|
- "RN50x16__openai",
|
|
|
- "RN50x64__openai",
|
|
|
- "ViT-B-32__openai",
|
|
|
- "ViT-B-32__laion2b_e16",
|
|
|
- "ViT-B-32__laion400m_e31",
|
|
|
- "ViT-B-32__laion400m_e32",
|
|
|
- "ViT-B-32__laion2b-s34b-b79k",
|
|
|
- "ViT-B-16__openai",
|
|
|
- "ViT-B-16__laion400m_e31",
|
|
|
- "ViT-B-16__laion400m_e32",
|
|
|
- "ViT-B-16-plus-240__laion400m_e31",
|
|
|
- "ViT-B-16-plus-240__laion400m_e32",
|
|
|
- "ViT-L-14__openai",
|
|
|
- "ViT-L-14__laion400m_e31",
|
|
|
- "ViT-L-14__laion400m_e32",
|
|
|
- "ViT-L-14__laion2b-s32b-b82k",
|
|
|
- "ViT-L-14-336__openai",
|
|
|
- "ViT-H-14__laion2b-s32b-b79k",
|
|
|
- "ViT-g-14__laion2b-s12b-b42k",
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-_MCLIP_MODELS = {
|
|
|
- "LABSE-Vit-L-14",
|
|
|
- "XLM-Roberta-Large-Vit-B-32",
|
|
|
- "XLM-Roberta-Large-Vit-B-16Plus",
|
|
|
- "XLM-Roberta-Large-Vit-L-14",
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-def _clean_model_name(model_name: str) -> str:
|
|
|
- return model_name.split("/")[-1].replace("::", "__")
|
|
|
-
|
|
|
-
|
|
|
-def is_openclip(model_name: str) -> bool:
|
|
|
- return _clean_model_name(model_name) in _OPENCLIP_MODELS
|
|
|
-
|
|
|
-
|
|
|
-def is_mclip(model_name: str) -> bool:
|
|
|
- return _clean_model_name(model_name) in _MCLIP_MODELS
|