test_main.py 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. import json
  2. import pickle
  3. from io import BytesIO
  4. from typing import Any, TypeAlias
  5. from unittest import mock
  6. import cv2
  7. import numpy as np
  8. import pytest
  9. from fastapi.testclient import TestClient
  10. from PIL import Image
  11. from pytest_mock import MockerFixture
  12. from .config import settings
  13. from .models.base import PicklableSessionOptions
  14. from .models.cache import ModelCache
  15. from .models.clip import CLIPEncoder
  16. from .models.facial_recognition import FaceRecognizer
  17. from .models.image_classification import ImageClassifier
  18. from .schemas import ModelType
  19. ndarray: TypeAlias = np.ndarray[int, np.dtype[np.float32]]
  20. class TestImageClassifier:
  21. classifier_preds = [
  22. {"label": "that's an image alright", "score": 0.8},
  23. {"label": "well it ends with .jpg", "score": 0.1},
  24. {"label": "idk, im just seeing bytes", "score": 0.05},
  25. {"label": "not sure", "score": 0.04},
  26. {"label": "probably a virus", "score": 0.01},
  27. ]
  28. def test_min_score(self, pil_image: Image.Image, mocker: MockerFixture) -> None:
  29. mocker.patch.object(ImageClassifier, "load")
  30. classifier = ImageClassifier("test_model_name", min_score=0.0)
  31. assert classifier.min_score == 0.0
  32. classifier.model = mock.Mock()
  33. classifier.model.return_value = self.classifier_preds
  34. all_labels = classifier.predict(pil_image)
  35. classifier.min_score = 0.5
  36. filtered_labels = classifier.predict(pil_image)
  37. assert all_labels == [
  38. "that's an image alright",
  39. "well it ends with .jpg",
  40. "idk",
  41. "im just seeing bytes",
  42. "not sure",
  43. "probably a virus",
  44. ]
  45. assert filtered_labels == ["that's an image alright"]
  46. class TestCLIP:
  47. embedding = np.random.rand(512).astype(np.float32)
  48. def test_basic_image(self, pil_image: Image.Image, mocker: MockerFixture) -> None:
  49. mocker.patch.object(CLIPEncoder, "download")
  50. mocked = mocker.patch("app.models.clip.ort.InferenceSession", autospec=True)
  51. mocked.return_value.run.return_value = [[self.embedding]]
  52. clip_encoder = CLIPEncoder("ViT-B-32::openai", cache_dir="test_cache", mode="vision")
  53. assert clip_encoder.mode == "vision"
  54. embedding = clip_encoder.predict(pil_image)
  55. assert isinstance(embedding, list)
  56. assert len(embedding) == 512
  57. assert all([isinstance(num, float) for num in embedding])
  58. clip_encoder.vision_model.run.assert_called_once()
  59. def test_basic_text(self, mocker: MockerFixture) -> None:
  60. mocker.patch.object(CLIPEncoder, "download")
  61. mocked = mocker.patch("app.models.clip.ort.InferenceSession", autospec=True)
  62. mocked.return_value.run.return_value = [[self.embedding]]
  63. clip_encoder = CLIPEncoder("ViT-B-32::openai", cache_dir="test_cache", mode="text")
  64. assert clip_encoder.mode == "text"
  65. embedding = clip_encoder.predict("test search query")
  66. assert isinstance(embedding, list)
  67. assert len(embedding) == 512
  68. assert all([isinstance(num, float) for num in embedding])
  69. clip_encoder.text_model.run.assert_called_once()
  70. class TestFaceRecognition:
  71. def test_set_min_score(self, mocker: MockerFixture) -> None:
  72. mocker.patch.object(FaceRecognizer, "load")
  73. face_recognizer = FaceRecognizer("test_model_name", cache_dir="test_cache", min_score=0.5)
  74. assert face_recognizer.min_score == 0.5
  75. def test_basic(self, cv_image: cv2.Mat, mocker: MockerFixture) -> None:
  76. mocker.patch.object(FaceRecognizer, "load")
  77. face_recognizer = FaceRecognizer("test_model_name", min_score=0.0, cache_dir="test_cache")
  78. det_model = mock.Mock()
  79. num_faces = 2
  80. bbox = np.random.rand(num_faces, 4).astype(np.float32)
  81. score = np.array([[0.67]] * num_faces).astype(np.float32)
  82. kpss = np.random.rand(num_faces, 5, 2).astype(np.float32)
  83. det_model.detect.return_value = (np.concatenate([bbox, score], axis=-1), kpss)
  84. face_recognizer.det_model = det_model
  85. rec_model = mock.Mock()
  86. embedding = np.random.rand(num_faces, 512).astype(np.float32)
  87. rec_model.get_feat.return_value = embedding
  88. face_recognizer.rec_model = rec_model
  89. faces = face_recognizer.predict(cv_image)
  90. assert len(faces) == num_faces
  91. for face in faces:
  92. assert face["imageHeight"] == 800
  93. assert face["imageWidth"] == 600
  94. assert isinstance(face["embedding"], list)
  95. assert len(face["embedding"]) == 512
  96. assert all([isinstance(num, float) for num in face["embedding"]])
  97. det_model.detect.assert_called_once()
  98. assert rec_model.get_feat.call_count == num_faces
  99. @pytest.mark.asyncio
  100. class TestCache:
  101. async def test_caches(self, mock_get_model: mock.Mock) -> None:
  102. model_cache = ModelCache()
  103. await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION)
  104. await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION)
  105. assert len(model_cache.cache._cache) == 1
  106. mock_get_model.assert_called_once()
  107. async def test_kwargs_used(self, mock_get_model: mock.Mock) -> None:
  108. model_cache = ModelCache()
  109. await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION, cache_dir="test_cache")
  110. mock_get_model.assert_called_once_with(
  111. ModelType.IMAGE_CLASSIFICATION, "test_model_name", cache_dir="test_cache"
  112. )
  113. async def test_different_clip(self, mock_get_model: mock.Mock) -> None:
  114. model_cache = ModelCache()
  115. await model_cache.get("test_image_model_name", ModelType.CLIP)
  116. await model_cache.get("test_text_model_name", ModelType.CLIP)
  117. mock_get_model.assert_has_calls(
  118. [
  119. mock.call(ModelType.CLIP, "test_image_model_name"),
  120. mock.call(ModelType.CLIP, "test_text_model_name"),
  121. ]
  122. )
  123. assert len(model_cache.cache._cache) == 2
  124. @mock.patch("app.models.cache.OptimisticLock", autospec=True)
  125. async def test_model_ttl(self, mock_lock_cls: mock.Mock, mock_get_model: mock.Mock) -> None:
  126. model_cache = ModelCache(ttl=100)
  127. await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION)
  128. mock_lock_cls.return_value.__aenter__.return_value.cas.assert_called_with(mock.ANY, ttl=100)
  129. @mock.patch("app.models.cache.SimpleMemoryCache.expire")
  130. async def test_revalidate(self, mock_cache_expire: mock.Mock, mock_get_model: mock.Mock) -> None:
  131. model_cache = ModelCache(ttl=100, revalidate=True)
  132. await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION)
  133. await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION)
  134. mock_cache_expire.assert_called_once_with(mock.ANY, 100)
  135. @pytest.mark.skipif(
  136. not settings.test_full,
  137. reason="More time-consuming since it deploys the app and loads models.",
  138. )
  139. class TestEndpoints:
  140. def test_tagging_endpoint(
  141. self, pil_image: Image.Image, responses: dict[str, Any], deployed_app: TestClient
  142. ) -> None:
  143. byte_image = BytesIO()
  144. pil_image.save(byte_image, format="jpeg")
  145. response = deployed_app.post(
  146. "http://localhost:3003/predict",
  147. data={
  148. "modelName": "microsoft/resnet-50",
  149. "modelType": "image-classification",
  150. "options": json.dumps({"minScore": 0.0}),
  151. },
  152. files={"image": byte_image.getvalue()},
  153. )
  154. assert response.status_code == 200
  155. assert response.json() == responses["image-classification"]
  156. def test_clip_image_endpoint(
  157. self, pil_image: Image.Image, responses: dict[str, Any], deployed_app: TestClient
  158. ) -> None:
  159. byte_image = BytesIO()
  160. pil_image.save(byte_image, format="jpeg")
  161. response = deployed_app.post(
  162. "http://localhost:3003/predict",
  163. data={"modelName": "ViT-B-32::openai", "modelType": "clip", "options": json.dumps({"mode": "vision"})},
  164. files={"image": byte_image.getvalue()},
  165. )
  166. assert response.status_code == 200
  167. assert response.json() == responses["clip"]["image"]
  168. def test_clip_text_endpoint(self, responses: dict[str, Any], deployed_app: TestClient) -> None:
  169. response = deployed_app.post(
  170. "http://localhost:3003/predict",
  171. data={
  172. "modelName": "ViT-B-32::openai",
  173. "modelType": "clip",
  174. "text": "test search query",
  175. "options": json.dumps({"mode": "text"}),
  176. },
  177. )
  178. assert response.status_code == 200
  179. assert response.json() == responses["clip"]["text"]
  180. def test_face_endpoint(self, pil_image: Image.Image, responses: dict[str, Any], deployed_app: TestClient) -> None:
  181. byte_image = BytesIO()
  182. pil_image.save(byte_image, format="jpeg")
  183. response = deployed_app.post(
  184. "http://localhost:3003/predict",
  185. data={
  186. "modelName": "buffalo_l",
  187. "modelType": "facial-recognition",
  188. "options": json.dumps({"minScore": 0.034}),
  189. },
  190. files={"image": byte_image.getvalue()},
  191. )
  192. assert response.status_code == 200
  193. assert response.json() == responses["facial-recognition"]
  194. def test_sess_options() -> None:
  195. sess_options = PicklableSessionOptions()
  196. sess_options.intra_op_num_threads = 1
  197. sess_options.inter_op_num_threads = 1
  198. pickled = pickle.dumps(sess_options)
  199. unpickled = pickle.loads(pickled)
  200. assert unpickled.intra_op_num_threads == 1
  201. assert unpickled.inter_op_num_threads == 1