run.py 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. import gc
  2. import os
  3. from pathlib import Path
  4. from tempfile import TemporaryDirectory
  5. from huggingface_hub import create_repo, login, upload_folder
  6. from models import mclip, openclip, tfclip
  7. from rich.progress import Progress
  8. models = [
  9. "RN50::openai",
  10. "RN50::yfcc15m",
  11. "RN50::cc12m",
  12. "RN101::openai",
  13. "RN101::yfcc15m",
  14. "RN50x4::openai",
  15. "RN50x16::openai",
  16. "RN50x64::openai",
  17. "ViT-B-32::openai",
  18. "ViT-B-32::laion2b_e16",
  19. "ViT-B-32::laion400m_e31",
  20. "ViT-B-32::laion400m_e32",
  21. "ViT-B-32::laion2b-s34b-b79k",
  22. "ViT-B-16::openai",
  23. "ViT-B-16::laion400m_e31",
  24. "ViT-B-16::laion400m_e32",
  25. "ViT-B-16-plus-240::laion400m_e31",
  26. "ViT-B-16-plus-240::laion400m_e32",
  27. "ViT-L-14::openai",
  28. "ViT-L-14::laion400m_e31",
  29. "ViT-L-14::laion400m_e32",
  30. "ViT-L-14::laion2b-s32b-b82k",
  31. "ViT-L-14-336::openai",
  32. "ViT-H-14::laion2b-s32b-b79k",
  33. "ViT-g-14::laion2b-s12b-b42k",
  34. "M-CLIP/LABSE-Vit-L-14",
  35. "M-CLIP/XLM-Roberta-Large-Vit-B-32",
  36. "M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
  37. "M-CLIP/XLM-Roberta-Large-Vit-L-14",
  38. "openai/clip-vit-base-patch32",
  39. ]
  40. # login(token=os.environ["HF_AUTH_TOKEN"])
  41. with Progress() as progress:
  42. task1 = progress.add_task("[green]Exporting models...", total=len(models))
  43. task2 = progress.add_task("[yellow]Uploading models...", total=len(models))
  44. with TemporaryDirectory() as tmp:
  45. tmpdir = Path(tmp)
  46. for model in models:
  47. model_name = model.split("/")[-1].replace("::", "__")
  48. config_path = tmpdir / model_name / "config.json"
  49. def upload() -> None:
  50. progress.update(task2, description=f"[yellow]Uploading {model_name}")
  51. repo_id = f"immich-app/{model_name}"
  52. create_repo(repo_id, exist_ok=True)
  53. upload_folder(repo_id=repo_id, folder_path=tmpdir / model_name)
  54. progress.update(task2, advance=1)
  55. def export() -> None:
  56. progress.update(task1, description=f"[green]Exporting {model_name}")
  57. visual_dir = tmpdir / model_name / "visual"
  58. textual_dir = tmpdir / model_name / "textual"
  59. if model.startswith("M-CLIP"):
  60. mclip.to_onnx(model, visual_dir, textual_dir)
  61. elif "/" in model:
  62. tfclip.to_tflite(model, visual_dir.as_posix(), textual_dir.as_posix())
  63. else:
  64. name, _, pretrained = model_name.partition("__")
  65. openclip.to_onnx(openclip.OpenCLIPModelConfig(name, pretrained), visual_dir, textual_dir)
  66. progress.update(task1, advance=1)
  67. gc.collect()
  68. export()
  69. upload()