locustfile.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. import json
  2. from argparse import ArgumentParser
  3. from io import BytesIO
  4. from typing import Any
  5. from locust import HttpUser, events, task
  6. from locust.env import Environment
  7. from PIL import Image
  8. byte_image = BytesIO()
  9. @events.init_command_line_parser.add_listener
  10. def _(parser: ArgumentParser) -> None:
  11. parser.add_argument("--tag-model", type=str, default="microsoft/resnet-50")
  12. parser.add_argument("--clip-model", type=str, default="ViT-B-32::openai")
  13. parser.add_argument("--face-model", type=str, default="buffalo_l")
  14. parser.add_argument(
  15. "--tag-min-score",
  16. type=int,
  17. default=0.0,
  18. help="Returns all tags at or above this score. The default returns all tags.",
  19. )
  20. parser.add_argument(
  21. "--face-min-score",
  22. type=int,
  23. default=0.034,
  24. help=(
  25. "Returns all faces at or above this score. The default returns 1 face per request; "
  26. "setting this to 0 blows up the number of faces to the thousands."
  27. ),
  28. )
  29. parser.add_argument("--image-size", type=int, default=1000)
  30. @events.test_start.add_listener
  31. def on_test_start(environment: Environment, **kwargs: Any) -> None:
  32. global byte_image
  33. assert environment.parsed_options is not None
  34. image = Image.new("RGB", (environment.parsed_options.image_size, environment.parsed_options.image_size))
  35. byte_image = BytesIO()
  36. image.save(byte_image, format="jpeg")
  37. class InferenceLoadTest(HttpUser):
  38. abstract: bool = True
  39. host = "http://127.0.0.1:3003"
  40. data: bytes
  41. headers: dict[str, str] = {"Content-Type": "image/jpg"}
  42. # re-use the image across all instances in a process
  43. def on_start(self) -> None:
  44. global byte_image
  45. self.data = byte_image.getvalue()
  46. class ClassificationFormDataLoadTest(InferenceLoadTest):
  47. @task
  48. def classify(self) -> None:
  49. data = [
  50. ("modelName", self.environment.parsed_options.clip_model),
  51. ("modelType", "clip"),
  52. ("options", json.dumps({"minScore": self.environment.parsed_options.tag_min_score})),
  53. ]
  54. files = {"image": self.data}
  55. self.client.post("/predict", data=data, files=files)
  56. class CLIPTextFormDataLoadTest(InferenceLoadTest):
  57. @task
  58. def encode_text(self) -> None:
  59. data = [
  60. ("modelName", self.environment.parsed_options.clip_model),
  61. ("modelType", "clip"),
  62. ("options", json.dumps({"mode": "text"})),
  63. ("text", "test search query"),
  64. ]
  65. self.client.post("/predict", data=data)
  66. class CLIPVisionFormDataLoadTest(InferenceLoadTest):
  67. @task
  68. def encode_image(self) -> None:
  69. data = [
  70. ("modelName", self.environment.parsed_options.clip_model),
  71. ("modelType", "clip"),
  72. ("options", json.dumps({"mode": "vision"})),
  73. ]
  74. files = {"image": self.data}
  75. self.client.post("/predict", data=data, files=files)
  76. class RecognitionFormDataLoadTest(InferenceLoadTest):
  77. @task
  78. def recognize(self) -> None:
  79. data = [
  80. ("modelName", self.environment.parsed_options.face_model),
  81. ("modelType", "facial-recognition"),
  82. ("options", json.dumps({"minScore": self.environment.parsed_options.face_min_score})),
  83. ]
  84. files = {"image": self.data}
  85. self.client.post("/predict", data=data, files=files)