2021-04-25 05:53:23 +00:00
/*
2024-10-04 11:19:50 +00:00
* Copyright ( c ) 2018 - 2020 , Andreas Kling < andreas @ ladybird . org >
2021-04-25 05:53:23 +00:00
* Copyright ( c ) 2021 , Andrew Kaster < akaster @ serenityos . org >
*
* SPDX - License - Identifier : BSD - 2 - Clause
*/
2021-08-29 18:55:05 +00:00
# include <AK/Function.h>
2021-04-25 05:53:23 +00:00
# include <LibCore/ArgsParser.h>
2023-10-27 09:47:43 +00:00
# include <LibTest/Macros.h>
2023-10-23 22:19:04 +00:00
# include <LibTest/TestResult.h>
2021-04-25 05:53:23 +00:00
# include <LibTest/TestSuite.h>
2023-03-08 22:04:59 +00:00
# include <math.h>
2021-04-25 05:53:23 +00:00
# include <stdlib.h>
# include <sys/time.h>
namespace Test {
TestSuite * TestSuite : : s_global = nullptr ;
class TestElapsedTimer {
public :
TestElapsedTimer ( ) { restart ( ) ; }
void restart ( ) { gettimeofday ( & m_started , nullptr ) ; }
u64 elapsed_milliseconds ( )
{
struct timeval now = { } ;
gettimeofday ( & now , nullptr ) ;
struct timeval delta = { } ;
timersub ( & now , & m_started , & delta ) ;
return delta . tv_sec * 1000 + delta . tv_usec / 1000 ;
}
private :
struct timeval m_started = { } ;
} ;
// Declared in Macros.h
2023-10-23 22:19:04 +00:00
TestResult current_test_result ( )
2021-04-25 05:53:23 +00:00
{
2023-10-23 22:19:04 +00:00
return TestSuite : : the ( ) . current_test_result ( ) ;
}
// Declared in Macros.h
void set_current_test_result ( TestResult result )
{
TestSuite : : the ( ) . set_current_test_result ( result ) ;
2021-04-25 05:53:23 +00:00
}
2023-10-23 22:35:15 +00:00
// Declared in Macros.h
void set_randomness_source ( Randomized : : RandomnessSource source )
{
TestSuite : : the ( ) . set_randomness_source ( move ( source ) ) ;
}
// Declared in Macros.h
Randomized : : RandomnessSource & randomness_source ( )
{
return TestSuite : : the ( ) . randomness_source ( ) ;
}
2023-10-27 09:47:43 +00:00
// Declared in Macros.h
u64 randomized_runs ( )
{
return TestSuite : : the ( ) . randomized_runs ( ) ;
}
2021-04-25 05:53:23 +00:00
// Declared in TestCase.h
2022-04-01 17:58:27 +00:00
void add_test_case_to_suite ( NonnullRefPtr < TestCase > const & test_case )
2021-04-25 05:53:23 +00:00
{
TestSuite : : the ( ) . add_case ( test_case ) ;
}
2021-08-29 18:55:05 +00:00
// Declared in TestCase.h
void set_suite_setup_function ( Function < void ( ) > setup )
{
TestSuite : : the ( ) . set_suite_setup ( move ( setup ) ) ;
}
2023-10-23 23:26:28 +00:00
// Declared in Macros.h
bool is_reporting_enabled ( )
{
return TestSuite : : the ( ) . is_reporting_enabled ( ) ;
}
// Declared in Macros.h
void enable_reporting ( )
{
TestSuite : : the ( ) . enable_reporting ( ) ;
}
// Declared in Macros.h
void disable_reporting ( )
{
TestSuite : : the ( ) . disable_reporting ( ) ;
}
2023-12-16 14:19:34 +00:00
static ByteString test_result_to_string ( TestResult result )
2023-10-23 22:19:04 +00:00
{
switch ( result ) {
case TestResult : : NotRun :
return " Not run " ;
case TestResult : : Passed :
return " Completed " ;
case TestResult : : Failed :
return " Failed " ;
2023-10-23 23:34:46 +00:00
case TestResult : : Rejected :
return " Rejected " ;
2023-10-23 22:35:15 +00:00
case TestResult : : Overrun :
return " Ran out of randomness " ;
2023-10-23 22:19:04 +00:00
default :
return " Unknown TestResult " ;
}
}
2023-12-16 14:19:34 +00:00
int TestSuite : : main ( ByteString const & suite_name , Span < StringView > arguments )
2021-04-25 05:53:23 +00:00
{
m_suite_name = suite_name ;
Core : : ArgsParser args_parser ;
bool do_tests_only = getenv ( " TESTS_ONLY " ) ! = nullptr ;
bool do_benchmarks_only = false ;
bool do_list_cases = false ;
2023-02-28 20:41:43 +00:00
StringView search_string = " * " sv ;
2021-04-25 05:53:23 +00:00
2024-04-20 20:34:56 +00:00
args_parser . add_option ( do_tests_only , " Only run tests. " , " tests " ) ;
args_parser . add_option ( do_benchmarks_only , " Only run benchmarks. " , " bench " ) ;
2023-03-08 22:04:59 +00:00
args_parser . add_option ( m_benchmark_repetitions , " Number of times to repeat each benchmark (default 1) " , " benchmark_repetitions " , 0 , " N " ) ;
2023-10-27 09:47:43 +00:00
args_parser . add_option ( m_randomized_runs , " Number of times to run each RANDOMIZED_TEST_CASE (default 100) " , " randomized_runs " , 0 , " RUNS " ) ;
2024-04-20 20:34:56 +00:00
args_parser . add_option ( do_list_cases , " List available test cases. " , " list " ) ;
2021-04-25 05:53:23 +00:00
args_parser . add_positional_argument ( search_string , " Only run matching cases. " , " pattern " , Core : : ArgsParser : : Required : : No ) ;
2023-02-21 11:44:41 +00:00
args_parser . parse ( arguments ) ;
2021-04-25 05:53:23 +00:00
2021-08-29 18:55:05 +00:00
if ( m_setup )
m_setup ( ) ;
2022-04-01 17:58:27 +00:00
auto const & matching_tests = find_cases ( search_string , ! do_benchmarks_only , ! do_tests_only ) ;
2021-04-25 05:53:23 +00:00
if ( do_list_cases ) {
outln ( " Available cases for {}: " , suite_name ) ;
2022-04-01 17:58:27 +00:00
for ( auto const & test : matching_tests ) {
2023-03-06 13:17:01 +00:00
outln ( " {} " , test - > name ( ) ) ;
2021-04-25 05:53:23 +00:00
}
return 0 ;
}
outln ( " Running {} cases out of {}. " , matching_tests . size ( ) , m_cases . size ( ) ) ;
return run ( matching_tests ) ;
}
2023-12-16 14:19:34 +00:00
Vector < NonnullRefPtr < TestCase > > TestSuite : : find_cases ( ByteString const & search , bool find_tests , bool find_benchmarks )
2021-04-25 05:53:23 +00:00
{
2023-03-06 13:17:01 +00:00
Vector < NonnullRefPtr < TestCase > > matches ;
2023-02-19 22:31:59 +00:00
for ( auto & t : m_cases ) {
2023-03-06 13:17:01 +00:00
if ( ! search . is_empty ( ) & & ! t - > name ( ) . matches ( search , CaseSensitivity : : CaseInsensitive ) ) {
2021-04-25 05:53:23 +00:00
continue ;
}
2023-03-06 13:17:01 +00:00
if ( ! find_tests & & ! t - > is_benchmark ( ) ) {
2021-04-25 05:53:23 +00:00
continue ;
}
2023-03-06 13:17:01 +00:00
if ( ! find_benchmarks & & t - > is_benchmark ( ) ) {
2021-04-25 05:53:23 +00:00
continue ;
}
matches . append ( t ) ;
}
return matches ;
}
2023-03-06 13:17:01 +00:00
int TestSuite : : run ( Vector < NonnullRefPtr < TestCase > > const & tests )
2021-04-25 05:53:23 +00:00
{
size_t test_count = 0 ;
2023-10-23 22:19:04 +00:00
size_t test_passed_count = 0 ;
2021-04-25 05:53:23 +00:00
size_t test_failed_count = 0 ;
size_t benchmark_count = 0 ;
2023-10-23 22:19:04 +00:00
size_t benchmark_passed_count = 0 ;
size_t benchmark_failed_count = 0 ;
2021-04-25 05:53:23 +00:00
TestElapsedTimer global_timer ;
2022-04-01 17:58:27 +00:00
for ( auto const & t : tests ) {
2023-03-06 13:17:01 +00:00
auto const test_type = t - > is_benchmark ( ) ? " benchmark " : " test " ;
2023-03-08 22:04:59 +00:00
auto const repetitions = t - > is_benchmark ( ) ? m_benchmark_repetitions : 1 ;
2021-04-25 05:53:23 +00:00
2023-03-06 13:17:01 +00:00
warnln ( " Running {} '{}'. " , test_type , t - > name ( ) ) ;
2023-10-23 22:19:04 +00:00
m_current_test_result = TestResult : : NotRun ;
2023-10-23 23:26:28 +00:00
enable_reporting ( ) ;
2021-04-25 05:53:23 +00:00
2023-03-08 22:04:59 +00:00
u64 total_time = 0 ;
u64 sum_of_squared_times = 0 ;
u64 min_time = NumericLimits < u64 > : : max ( ) ;
u64 max_time = 0 ;
for ( u64 i = 0 ; i < repetitions ; + + i ) {
TestElapsedTimer timer ;
t - > func ( ) ( ) ;
auto const iteration_time = timer . elapsed_milliseconds ( ) ;
total_time + = iteration_time ;
sum_of_squared_times + = iteration_time * iteration_time ;
min_time = min ( min_time , iteration_time ) ;
max_time = max ( max_time , iteration_time ) ;
2023-10-23 22:19:04 +00:00
// Non-randomized tests don't touch the test result when passing.
if ( m_current_test_result = = TestResult : : NotRun )
m_current_test_result = TestResult : : Passed ;
2023-03-08 22:04:59 +00:00
}
if ( repetitions ! = 1 ) {
double average = total_time / double ( repetitions ) ;
double average_squared = average * average ;
double standard_deviation = sqrt ( ( sum_of_squared_times + repetitions * average_squared - 2 * total_time * average ) / ( repetitions - 1 ) ) ;
2021-04-25 05:53:23 +00:00
2023-03-08 22:04:59 +00:00
dbgln ( " {} {} '{}' on average in {:.1f}±{:.1f}ms (min={}ms, max={}ms, total={}ms) " ,
2023-10-23 22:19:04 +00:00
test_result_to_string ( m_current_test_result ) , test_type , t - > name ( ) ,
2023-03-08 22:04:59 +00:00
average , standard_deviation , min_time , max_time , total_time ) ;
} else {
2023-10-23 22:19:04 +00:00
dbgln ( " {} {} '{}' in {}ms " , test_result_to_string ( m_current_test_result ) , test_type , t - > name ( ) , total_time ) ;
2023-03-08 22:04:59 +00:00
}
2021-04-25 05:53:23 +00:00
2023-03-06 13:17:01 +00:00
if ( t - > is_benchmark ( ) ) {
2023-03-08 22:04:59 +00:00
m_benchtime + = total_time ;
2021-04-25 05:53:23 +00:00
benchmark_count + + ;
2023-10-23 22:19:04 +00:00
switch ( m_current_test_result ) {
case TestResult : : Passed :
benchmark_passed_count + + ;
break ;
case TestResult : : Failed :
benchmark_failed_count + + ;
break ;
default :
break ;
}
2021-04-25 05:53:23 +00:00
} else {
2023-03-08 22:04:59 +00:00
m_testtime + = total_time ;
2021-04-25 05:53:23 +00:00
test_count + + ;
2023-10-23 22:19:04 +00:00
switch ( m_current_test_result ) {
case TestResult : : Passed :
test_passed_count + + ;
break ;
case TestResult : : Failed :
test_failed_count + + ;
break ;
default :
break ;
}
2021-04-25 05:53:23 +00:00
}
}
dbgln ( " Finished {} tests and {} benchmarks in {}ms ({}ms tests, {}ms benchmarks, {}ms other). " ,
test_count ,
benchmark_count ,
global_timer . elapsed_milliseconds ( ) ,
m_testtime ,
m_benchtime ,
global_timer . elapsed_milliseconds ( ) - ( m_testtime + m_benchtime ) ) ;
2023-03-28 14:57:59 +00:00
2023-10-23 22:19:04 +00:00
if ( test_count ! = 0 ) {
if ( test_passed_count = = test_count ) {
dbgln ( " All {} tests passed. " , test_count ) ;
} else if ( test_passed_count + test_failed_count = = test_count ) {
dbgln ( " Out of {} tests, {} passed and {} failed. " , test_count , test_passed_count , test_failed_count ) ;
} else {
dbgln ( " Out of {} tests, {} passed, {} failed and {} didn't finish for other reasons. " , test_count , test_passed_count , test_failed_count , test_count - test_passed_count - test_failed_count ) ;
}
}
2021-04-25 05:53:23 +00:00
2023-10-23 22:19:04 +00:00
if ( benchmark_count ! = 0 ) {
if ( benchmark_passed_count = = benchmark_count ) {
dbgln ( " All {} benchmarks passed. " , benchmark_count ) ;
} else if ( benchmark_passed_count + benchmark_failed_count = = benchmark_count ) {
dbgln ( " Out of {} benchmarks, {} passed and {} failed. " , benchmark_count , benchmark_passed_count , benchmark_failed_count ) ;
} else {
dbgln ( " Out of {} benchmarks, {} passed, {} failed and {} didn't finish for other reasons. " , benchmark_count , benchmark_passed_count , benchmark_failed_count , benchmark_count - benchmark_passed_count - benchmark_failed_count ) ;
}
}
2021-04-25 05:53:23 +00:00
2023-10-23 22:19:04 +00:00
// We have multiple TestResults, all except for Passed being "bad".
// Let's get a count of them:
return ( int ) ( test_count - test_passed_count + benchmark_count - benchmark_passed_count ) ;
2021-04-25 05:53:23 +00:00
}
2023-10-23 22:19:04 +00:00
} // namespace Test