run.py 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576
  1. import gc
  2. import os
  3. from pathlib import Path
  4. from tempfile import TemporaryDirectory
  5. from huggingface_hub import create_repo, login, upload_folder
  6. from models import mclip, openclip
  7. from rich.progress import Progress
  8. models = [
  9. "RN50::openai",
  10. "RN50::yfcc15m",
  11. "RN50::cc12m",
  12. "RN101::openai",
  13. "RN101::yfcc15m",
  14. "RN50x4::openai",
  15. "RN50x16::openai",
  16. "RN50x64::openai",
  17. "ViT-B-32::openai",
  18. "ViT-B-32::laion2b_e16",
  19. "ViT-B-32::laion400m_e31",
  20. "ViT-B-32::laion400m_e32",
  21. "ViT-B-32::laion2b-s34b-b79k",
  22. "ViT-B-16::openai",
  23. "ViT-B-16::laion400m_e31",
  24. "ViT-B-16::laion400m_e32",
  25. "ViT-B-16-plus-240::laion400m_e31",
  26. "ViT-B-16-plus-240::laion400m_e32",
  27. "ViT-L-14::openai",
  28. "ViT-L-14::laion400m_e31",
  29. "ViT-L-14::laion400m_e32",
  30. "ViT-L-14::laion2b-s32b-b82k",
  31. "ViT-L-14-336::openai",
  32. "ViT-H-14::laion2b-s32b-b79k",
  33. "ViT-g-14::laion2b-s12b-b42k",
  34. "M-CLIP/LABSE-Vit-L-14",
  35. "M-CLIP/XLM-Roberta-Large-Vit-B-32",
  36. "M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
  37. "M-CLIP/XLM-Roberta-Large-Vit-L-14",
  38. ]
  39. login(token=os.environ["HF_AUTH_TOKEN"])
  40. with Progress() as progress:
  41. task1 = progress.add_task("[green]Exporting models...", total=len(models))
  42. task2 = progress.add_task("[yellow]Uploading models...", total=len(models))
  43. with TemporaryDirectory() as tmp:
  44. tmpdir = Path(tmp)
  45. for model in models:
  46. model_name = model.split("/")[-1].replace("::", "__")
  47. config_path = tmpdir / model_name / "config.json"
  48. def upload() -> None:
  49. progress.update(task2, description=f"[yellow]Uploading {model_name}")
  50. repo_id = f"immich-app/{model_name}"
  51. create_repo(repo_id, exist_ok=True)
  52. upload_folder(repo_id=repo_id, folder_path=tmpdir / model_name)
  53. progress.update(task2, advance=1)
  54. def export() -> None:
  55. progress.update(task1, description=f"[green]Exporting {model_name}")
  56. visual_dir = tmpdir / model_name / "visual"
  57. textual_dir = tmpdir / model_name / "textual"
  58. if model.startswith("M-CLIP"):
  59. mclip.to_onnx(model, visual_dir, textual_dir)
  60. else:
  61. name, _, pretrained = model_name.partition("__")
  62. openclip.to_onnx(openclip.OpenCLIPModelConfig(name, pretrained), visual_dir, textual_dir)
  63. progress.update(task1, advance=1)
  64. gc.collect()
  65. export()
  66. upload()