load_test.sh 982 B

123456789101112131415161718192021222324
  1. export MACHINE_LEARNING_CACHE_FOLDER=/tmp/model_cache
  2. export MACHINE_LEARNING_MIN_FACE_SCORE=0.034 # returns 1 face per request; setting this to 0 blows up the number of faces to the thousands
  3. export MACHINE_LEARNING_MIN_TAG_SCORE=0.0
  4. export PID_FILE=/tmp/locust_pid
  5. export LOG_FILE=/tmp/gunicorn.log
  6. export HEADLESS=false
  7. export HOST=127.0.0.1:3003
  8. export CONCURRENCY=4
  9. export NUM_ENDPOINTS=3
  10. export PYTHONPATH=app
  11. gunicorn app.main:app --worker-class uvicorn.workers.UvicornWorker \
  12. --bind $HOST --daemon --error-logfile $LOG_FILE --pid $PID_FILE
  13. while true ; do
  14. echo "Loading models..."
  15. sleep 5
  16. if cat $LOG_FILE | grep -q -E "startup complete"; then break; fi
  17. done
  18. # "users" are assigned only one task, so multiply concurrency by the number of tasks
  19. locust --host http://$HOST --web-host 127.0.0.1 \
  20. --run-time 120s --users $(($CONCURRENCY * $NUM_ENDPOINTS)) $(if $HEADLESS; then echo "--headless"; fi)
  21. if [[ -e $PID_FILE ]]; then kill $(cat $PID_FILE); fi