TestSuite.cpp 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2021, Andrew Kaster <akaster@serenityos.org>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/Function.h>
  8. #include <LibCore/ArgsParser.h>
  9. #include <LibTest/Macros.h>
  10. #include <LibTest/TestResult.h>
  11. #include <LibTest/TestSuite.h>
  12. #include <math.h>
  13. #include <stdlib.h>
  14. #include <sys/time.h>
  15. namespace Test {
  16. TestSuite* TestSuite::s_global = nullptr;
  17. class TestElapsedTimer {
  18. public:
  19. TestElapsedTimer() { restart(); }
  20. void restart() { gettimeofday(&m_started, nullptr); }
  21. u64 elapsed_milliseconds()
  22. {
  23. struct timeval now = {};
  24. gettimeofday(&now, nullptr);
  25. struct timeval delta = {};
  26. timersub(&now, &m_started, &delta);
  27. return delta.tv_sec * 1000 + delta.tv_usec / 1000;
  28. }
  29. private:
  30. struct timeval m_started = {};
  31. };
  32. // Declared in Macros.h
  33. TestResult current_test_result()
  34. {
  35. return TestSuite::the().current_test_result();
  36. }
  37. // Declared in Macros.h
  38. void set_current_test_result(TestResult result)
  39. {
  40. TestSuite::the().set_current_test_result(result);
  41. }
  42. // Declared in Macros.h
  43. void set_randomness_source(Randomized::RandomnessSource source)
  44. {
  45. TestSuite::the().set_randomness_source(move(source));
  46. }
  47. // Declared in Macros.h
  48. Randomized::RandomnessSource& randomness_source()
  49. {
  50. return TestSuite::the().randomness_source();
  51. }
  52. // Declared in Macros.h
  53. u64 randomized_runs()
  54. {
  55. return TestSuite::the().randomized_runs();
  56. }
  57. // Declared in TestCase.h
  58. void add_test_case_to_suite(NonnullRefPtr<TestCase> const& test_case)
  59. {
  60. TestSuite::the().add_case(test_case);
  61. }
  62. // Declared in TestCase.h
  63. void set_suite_setup_function(Function<void()> setup)
  64. {
  65. TestSuite::the().set_suite_setup(move(setup));
  66. }
  67. // Declared in Macros.h
  68. bool is_reporting_enabled()
  69. {
  70. return TestSuite::the().is_reporting_enabled();
  71. }
  72. // Declared in Macros.h
  73. void enable_reporting()
  74. {
  75. TestSuite::the().enable_reporting();
  76. }
  77. // Declared in Macros.h
  78. void disable_reporting()
  79. {
  80. TestSuite::the().disable_reporting();
  81. }
  82. static ByteString test_result_to_string(TestResult result)
  83. {
  84. switch (result) {
  85. case TestResult::NotRun:
  86. return "Not run";
  87. case TestResult::Passed:
  88. return "Completed";
  89. case TestResult::Failed:
  90. return "Failed";
  91. case TestResult::Rejected:
  92. return "Rejected";
  93. case TestResult::Overrun:
  94. return "Ran out of randomness";
  95. default:
  96. return "Unknown TestResult";
  97. }
  98. }
  99. int TestSuite::main(ByteString const& suite_name, Span<StringView> arguments)
  100. {
  101. m_suite_name = suite_name;
  102. Core::ArgsParser args_parser;
  103. bool do_tests_only = getenv("TESTS_ONLY") != nullptr;
  104. bool do_benchmarks_only = false;
  105. bool do_list_cases = false;
  106. StringView search_string = "*"sv;
  107. args_parser.add_option(do_tests_only, "Only run tests.", "tests", 0);
  108. args_parser.add_option(do_benchmarks_only, "Only run benchmarks.", "bench", 0);
  109. args_parser.add_option(m_benchmark_repetitions, "Number of times to repeat each benchmark (default 1)", "benchmark_repetitions", 0, "N");
  110. args_parser.add_option(m_randomized_runs, "Number of times to run each RANDOMIZED_TEST_CASE (default 100)", "randomized_runs", 0, "RUNS");
  111. args_parser.add_option(do_list_cases, "List available test cases.", "list", 0);
  112. args_parser.add_positional_argument(search_string, "Only run matching cases.", "pattern", Core::ArgsParser::Required::No);
  113. args_parser.parse(arguments);
  114. if (m_setup)
  115. m_setup();
  116. auto const& matching_tests = find_cases(search_string, !do_benchmarks_only, !do_tests_only);
  117. if (do_list_cases) {
  118. outln("Available cases for {}:", suite_name);
  119. for (auto const& test : matching_tests) {
  120. outln(" {}", test->name());
  121. }
  122. return 0;
  123. }
  124. outln("Running {} cases out of {}.", matching_tests.size(), m_cases.size());
  125. return run(matching_tests);
  126. }
  127. Vector<NonnullRefPtr<TestCase>> TestSuite::find_cases(ByteString const& search, bool find_tests, bool find_benchmarks)
  128. {
  129. Vector<NonnullRefPtr<TestCase>> matches;
  130. for (auto& t : m_cases) {
  131. if (!search.is_empty() && !t->name().matches(search, CaseSensitivity::CaseInsensitive)) {
  132. continue;
  133. }
  134. if (!find_tests && !t->is_benchmark()) {
  135. continue;
  136. }
  137. if (!find_benchmarks && t->is_benchmark()) {
  138. continue;
  139. }
  140. matches.append(t);
  141. }
  142. return matches;
  143. }
  144. int TestSuite::run(Vector<NonnullRefPtr<TestCase>> const& tests)
  145. {
  146. size_t test_count = 0;
  147. size_t test_passed_count = 0;
  148. size_t test_failed_count = 0;
  149. size_t benchmark_count = 0;
  150. size_t benchmark_passed_count = 0;
  151. size_t benchmark_failed_count = 0;
  152. TestElapsedTimer global_timer;
  153. for (auto const& t : tests) {
  154. auto const test_type = t->is_benchmark() ? "benchmark" : "test";
  155. auto const repetitions = t->is_benchmark() ? m_benchmark_repetitions : 1;
  156. warnln("Running {} '{}'.", test_type, t->name());
  157. m_current_test_result = TestResult::NotRun;
  158. enable_reporting();
  159. u64 total_time = 0;
  160. u64 sum_of_squared_times = 0;
  161. u64 min_time = NumericLimits<u64>::max();
  162. u64 max_time = 0;
  163. for (u64 i = 0; i < repetitions; ++i) {
  164. TestElapsedTimer timer;
  165. t->func()();
  166. auto const iteration_time = timer.elapsed_milliseconds();
  167. total_time += iteration_time;
  168. sum_of_squared_times += iteration_time * iteration_time;
  169. min_time = min(min_time, iteration_time);
  170. max_time = max(max_time, iteration_time);
  171. // Non-randomized tests don't touch the test result when passing.
  172. if (m_current_test_result == TestResult::NotRun)
  173. m_current_test_result = TestResult::Passed;
  174. }
  175. if (repetitions != 1) {
  176. double average = total_time / double(repetitions);
  177. double average_squared = average * average;
  178. double standard_deviation = sqrt((sum_of_squared_times + repetitions * average_squared - 2 * total_time * average) / (repetitions - 1));
  179. dbgln("{} {} '{}' on average in {:.1f}±{:.1f}ms (min={}ms, max={}ms, total={}ms)",
  180. test_result_to_string(m_current_test_result), test_type, t->name(),
  181. average, standard_deviation, min_time, max_time, total_time);
  182. } else {
  183. dbgln("{} {} '{}' in {}ms", test_result_to_string(m_current_test_result), test_type, t->name(), total_time);
  184. }
  185. if (t->is_benchmark()) {
  186. m_benchtime += total_time;
  187. benchmark_count++;
  188. switch (m_current_test_result) {
  189. case TestResult::Passed:
  190. benchmark_passed_count++;
  191. break;
  192. case TestResult::Failed:
  193. benchmark_failed_count++;
  194. break;
  195. default:
  196. break;
  197. }
  198. } else {
  199. m_testtime += total_time;
  200. test_count++;
  201. switch (m_current_test_result) {
  202. case TestResult::Passed:
  203. test_passed_count++;
  204. break;
  205. case TestResult::Failed:
  206. test_failed_count++;
  207. break;
  208. default:
  209. break;
  210. }
  211. }
  212. }
  213. dbgln("Finished {} tests and {} benchmarks in {}ms ({}ms tests, {}ms benchmarks, {}ms other).",
  214. test_count,
  215. benchmark_count,
  216. global_timer.elapsed_milliseconds(),
  217. m_testtime,
  218. m_benchtime,
  219. global_timer.elapsed_milliseconds() - (m_testtime + m_benchtime));
  220. if (test_count != 0) {
  221. if (test_passed_count == test_count) {
  222. dbgln("All {} tests passed.", test_count);
  223. } else if (test_passed_count + test_failed_count == test_count) {
  224. dbgln("Out of {} tests, {} passed and {} failed.", test_count, test_passed_count, test_failed_count);
  225. } else {
  226. dbgln("Out of {} tests, {} passed, {} failed and {} didn't finish for other reasons.", test_count, test_passed_count, test_failed_count, test_count - test_passed_count - test_failed_count);
  227. }
  228. }
  229. if (benchmark_count != 0) {
  230. if (benchmark_passed_count == benchmark_count) {
  231. dbgln("All {} benchmarks passed.", benchmark_count);
  232. } else if (benchmark_passed_count + benchmark_failed_count == benchmark_count) {
  233. dbgln("Out of {} benchmarks, {} passed and {} failed.", benchmark_count, benchmark_passed_count, benchmark_failed_count);
  234. } else {
  235. dbgln("Out of {} benchmarks, {} passed, {} failed and {} didn't finish for other reasons.", benchmark_count, benchmark_passed_count, benchmark_failed_count, benchmark_count - benchmark_passed_count - benchmark_failed_count);
  236. }
  237. }
  238. // We have multiple TestResults, all except for Passed being "bad".
  239. // Let's get a count of them:
  240. return (int)(test_count - test_passed_count + benchmark_count - benchmark_passed_count);
  241. }
  242. } // namespace Test