main.py 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. import os
  2. from io import BytesIO
  3. from typing import Any
  4. import cv2
  5. import numpy as np
  6. import uvicorn
  7. from fastapi import Body, Depends, FastAPI
  8. from PIL import Image
  9. from .config import settings
  10. from .models.base import InferenceModel
  11. from .models.cache import ModelCache
  12. from .schemas import (
  13. EmbeddingResponse,
  14. FaceResponse,
  15. MessageResponse,
  16. ModelType,
  17. TagResponse,
  18. TextModelRequest,
  19. TextResponse,
  20. )
  21. app = FastAPI()
  22. @app.on_event("startup")
  23. async def startup_event() -> None:
  24. app.state.model_cache = ModelCache(ttl=settings.model_ttl, revalidate=True)
  25. models = [
  26. (settings.classification_model, ModelType.IMAGE_CLASSIFICATION),
  27. (settings.clip_image_model, ModelType.CLIP),
  28. (settings.clip_text_model, ModelType.CLIP),
  29. (settings.facial_recognition_model, ModelType.FACIAL_RECOGNITION),
  30. ]
  31. # Get all models
  32. for model_name, model_type in models:
  33. if settings.eager_startup:
  34. await app.state.model_cache.get(model_name, model_type)
  35. else:
  36. InferenceModel.from_model_type(model_type, model_name)
  37. def dep_pil_image(byte_image: bytes = Body(...)) -> Image.Image:
  38. return Image.open(BytesIO(byte_image))
  39. def dep_cv_image(byte_image: bytes = Body(...)) -> cv2.Mat:
  40. byte_image_np = np.frombuffer(byte_image, np.uint8)
  41. return cv2.imdecode(byte_image_np, cv2.IMREAD_COLOR)
  42. @app.get("/", response_model=MessageResponse)
  43. async def root() -> dict[str, str]:
  44. return {"message": "Immich ML"}
  45. @app.get("/ping", response_model=TextResponse)
  46. def ping() -> str:
  47. return "pong"
  48. @app.post(
  49. "/image-classifier/tag-image",
  50. response_model=TagResponse,
  51. status_code=200,
  52. )
  53. async def image_classification(
  54. image: Image.Image = Depends(dep_pil_image),
  55. ) -> list[str]:
  56. model = await app.state.model_cache.get(
  57. settings.classification_model, ModelType.IMAGE_CLASSIFICATION
  58. )
  59. labels = model.predict(image)
  60. return labels
  61. @app.post(
  62. "/sentence-transformer/encode-image",
  63. response_model=EmbeddingResponse,
  64. status_code=200,
  65. )
  66. async def clip_encode_image(
  67. image: Image.Image = Depends(dep_pil_image),
  68. ) -> list[float]:
  69. model = await app.state.model_cache.get(settings.clip_image_model, ModelType.CLIP)
  70. embedding = model.predict(image)
  71. return embedding
  72. @app.post(
  73. "/sentence-transformer/encode-text",
  74. response_model=EmbeddingResponse,
  75. status_code=200,
  76. )
  77. async def clip_encode_text(payload: TextModelRequest) -> list[float]:
  78. model = await app.state.model_cache.get(settings.clip_text_model, ModelType.CLIP)
  79. embedding = model.predict(payload.text)
  80. return embedding
  81. @app.post(
  82. "/facial-recognition/detect-faces",
  83. response_model=FaceResponse,
  84. status_code=200,
  85. )
  86. async def facial_recognition(
  87. image: cv2.Mat = Depends(dep_cv_image),
  88. ) -> list[dict[str, Any]]:
  89. model = await app.state.model_cache.get(
  90. settings.facial_recognition_model, ModelType.FACIAL_RECOGNITION
  91. )
  92. faces = model.predict(image)
  93. return faces
  94. if __name__ == "__main__":
  95. is_dev = os.getenv("NODE_ENV") == "development"
  96. uvicorn.run(
  97. "app.main:app",
  98. host=settings.host,
  99. port=settings.port,
  100. reload=is_dev,
  101. workers=settings.workers,
  102. )