locustfile.py 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. from io import BytesIO
  2. import json
  3. from typing import Any
  4. from locust import HttpUser, events, task
  5. from locust.env import Environment
  6. from PIL import Image
  7. from argparse import ArgumentParser
  8. byte_image = BytesIO()
  9. @events.init_command_line_parser.add_listener
  10. def _(parser: ArgumentParser) -> None:
  11. parser.add_argument("--tag-model", type=str, default="microsoft/resnet-50")
  12. parser.add_argument("--clip-model", type=str, default="ViT-B-32::openai")
  13. parser.add_argument("--face-model", type=str, default="buffalo_l")
  14. parser.add_argument("--tag-min-score", type=int, default=0.0,
  15. help="Returns all tags at or above this score. The default returns all tags.")
  16. parser.add_argument("--face-min-score", type=int, default=0.034,
  17. help=("Returns all faces at or above this score. The default returns 1 face per request; "
  18. "setting this to 0 blows up the number of faces to the thousands."))
  19. parser.add_argument("--image-size", type=int, default=1000)
  20. @events.test_start.add_listener
  21. def on_test_start(environment: Environment, **kwargs: Any) -> None:
  22. global byte_image
  23. assert environment.parsed_options is not None
  24. image = Image.new("RGB", (environment.parsed_options.image_size, environment.parsed_options.image_size))
  25. byte_image = BytesIO()
  26. image.save(byte_image, format="jpeg")
  27. class InferenceLoadTest(HttpUser):
  28. abstract: bool = True
  29. host = "http://127.0.0.1:3003"
  30. data: bytes
  31. headers: dict[str, str] = {"Content-Type": "image/jpg"}
  32. # re-use the image across all instances in a process
  33. def on_start(self) -> None:
  34. global byte_image
  35. self.data = byte_image.getvalue()
  36. class ClassificationFormDataLoadTest(InferenceLoadTest):
  37. @task
  38. def classify(self) -> None:
  39. data = [
  40. ("modelName", self.environment.parsed_options.clip_model),
  41. ("modelType", "clip"),
  42. ("options", json.dumps({"minScore": self.environment.parsed_options.tag_min_score})),
  43. ]
  44. files = {"image": self.data}
  45. self.client.post("/predict", data=data, files=files)
  46. class CLIPTextFormDataLoadTest(InferenceLoadTest):
  47. @task
  48. def encode_text(self) -> None:
  49. data = [
  50. ("modelName", self.environment.parsed_options.clip_model),
  51. ("modelType", "clip"),
  52. ("options", json.dumps({"mode": "text"})),
  53. ("text", "test search query")
  54. ]
  55. self.client.post("/predict", data=data)
  56. class CLIPVisionFormDataLoadTest(InferenceLoadTest):
  57. @task
  58. def encode_image(self) -> None:
  59. data = [
  60. ("modelName", self.environment.parsed_options.clip_model),
  61. ("modelType", "clip"),
  62. ("options", json.dumps({"mode": "vision"})),
  63. ]
  64. files = {"image": self.data}
  65. self.client.post("/predict", data=data, files=files)
  66. class RecognitionFormDataLoadTest(InferenceLoadTest):
  67. @task
  68. def recognize(self) -> None:
  69. data = [
  70. ("modelName", self.environment.parsed_options.face_model),
  71. ("modelType", "facial-recognition"),
  72. ("options", json.dumps({"minScore": self.environment.parsed_options.face_min_score})),
  73. ]
  74. files = {"image": self.data}
  75. self.client.post("/predict", data=data, files=files)