From 4a02a7f508be7a36aae39304b044582cd6d455d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn=20Nieto?= Date: Mon, 5 Oct 2015 12:23:27 +0200 Subject: [PATCH 1/3] Add simple benchmarking Starting a function name with 'bench_' will now also store it and make clar run it, but only when invoked with the '-b' flag. In order to reduce how much clar overhead we measure, we put the timing code around the function call, which means we end up timing every function, but we only store the elapsed time in benchmark mode. The overhead from calling the timer is minimal, so it shouldn't present an issue to add these calls for every test. This is not meant for micro-benchmarks, but for situations where we expect the operations to take at least tens or hundreds of milliseconds and we expect the operations not to be easily mapped into the "repeat N times" method to suss out the noise, so we don't include this functionality. This is for "perform very expensive operation" to track high-scale changes though time. Thus we also provide cl_reset_timer() to allow the function under test to perform specific setup steps that it does not want to count towards the timing. --- clar.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++ clar.h | 7 ++++ clar/print.h | 6 ++++ clar/time.h | 71 +++++++++++++++++++++++++++++++++++++++++ generate.py | 33 +++++++++++-------- test/clar_test.h | 1 + test/main.c | 6 +++- test/sample.c | 36 +++++++++++++++++++++ 8 files changed, 229 insertions(+), 14 deletions(-) create mode 100644 clar/time.h diff --git a/clar.c b/clar.c index 2caa871..bff920d 100644 --- a/clar.c +++ b/clar.c @@ -106,6 +106,15 @@ struct clar_error { struct clar_error *next; }; +struct clar_timing { + const char *test; + const char *suite; + + double elapsed; + + struct clar_timing *next; +}; + static struct { int argc; char **argv; @@ -124,9 +133,16 @@ static struct { int exit_on_error; int report_suite_names; + int run_benchmarks; + double timing_start; + double timing_end; + struct clar_error *errors; struct clar_error *last_error; + struct clar_timing *timings; + struct clar_timing *last_timing; + void (*local_cleanup)(void *); void *local_cleanup_payload; @@ -141,6 +157,7 @@ static struct { struct clar_func { const char *name; void (*ptr)(void); + int is_bench; }; struct clar_suite { @@ -156,6 +173,7 @@ struct clar_suite { static void clar_print_init(int test_count, int suite_count, const char *suite_names); static void clar_print_shutdown(int test_count, int suite_count, int error_count); static void clar_print_error(int num, const struct clar_error *error); +static void clar_print_timing(const struct clar_timing *timing); static void clar_print_ontest(const char *test_name, int test_number, enum cl_test_status failed); static void clar_print_onsuite(const char *suite_name, int suite_index); static void clar_print_onabort(const char *msg, ...); @@ -164,6 +182,12 @@ static void clar_print_onabort(const char *msg, ...); static void clar_unsandbox(void); static int clar_sandbox(void); +/* From time.h */ +/** + * Return the time from a monotonic timer. + */ +static double clar_timer(void); + /* Load the declarations for the test suite */ #include "clar.suite" @@ -203,6 +227,23 @@ clar_report_errors(void) _clar.errors = _clar.last_error = NULL; } +static void +clar_report_timings(void) +{ + struct clar_timing *timing, *next; + + timing = _clar.timings; + + while (timing != NULL) { + next = timing->next; + clar_print_timing(timing); + free(timing); + timing = next; + } + + _clar.timings = _clar.last_timing = NULL; +} + static void clar_run_test( const struct clar_func *test, @@ -219,7 +260,9 @@ clar_run_test( initialize->ptr(); CL_TRACE(CL_TRACE__TEST__RUN_BEGIN); + _clar.timing_start = clar_timer(); test->ptr(); + _clar.timing_end = clar_timer(); CL_TRACE(CL_TRACE__TEST__RUN_END); } @@ -246,6 +289,8 @@ clar_run_test( } } +static void clar_store_timing(void); + static void clar_run_suite(const struct clar_suite *suite, const char *filter) { @@ -282,13 +327,23 @@ clar_run_suite(const struct clar_suite *suite, const char *filter) if (filter && strncmp(test[i].name, filter, matchlen)) continue; + if (test[i].is_bench != _clar.run_benchmarks) + continue; + _clar.active_test = test[i].name; clar_run_test(&test[i], &suite->initialize, &suite->cleanup); if (_clar.exit_on_error && _clar.total_errors) return; + + if (test[i].is_bench) { + clar_store_timing(); + } } + puts(""); + clar_report_timings(); + _clar.active_test = NULL; CL_TRACE(CL_TRACE__SUITE_END); } @@ -305,6 +360,7 @@ clar_usage(const char *arg) printf(" -q \tOnly report tests that had an error\n"); printf(" -Q \tQuit as soon as a test fails\n"); printf(" -l \tPrint suite names\n"); + printf(" -b \tRun benchmarks instead of tests\n"); exit(-1); } @@ -363,6 +419,10 @@ clar_parse_args(int argc, char **argv) break; } + case 'b': + _clar.run_benchmarks = 1; + break; + case 'q': _clar.report_errors_only = 1; break; @@ -447,6 +507,23 @@ clar_test(int argc, char **argv) return errors; } +static void clar_store_timing(void) +{ + struct clar_timing *timing = calloc(1, sizeof(struct clar_timing)); + + if (_clar.timings == NULL) + _clar.timings = timing; + + if (_clar.last_timing != NULL) + _clar.last_timing->next = timing; + + _clar.last_timing = timing; + + timing->elapsed = _clar.timing_end - _clar.timing_start; + timing->test = _clar.active_test; + timing->suite = _clar.active_suite; +} + static void abort_test(void) { if (!_clar.trampoline_enabled) { @@ -460,6 +537,11 @@ static void abort_test(void) longjmp(_clar.trampoline, -1); } +void clar__reset_timer(void) +{ + _clar.timing_start = clar_timer(); +} + void clar__skip(void) { _clar.test_status = CL_TEST_SKIP; @@ -640,3 +722,4 @@ void cl_set_cleanup(void (*cleanup)(void *), void *opaque) #include "clar/fixtures.h" #include "clar/fs.h" #include "clar/print.h" +#include "clar/time.h" diff --git a/clar.h b/clar.h index 5c674d7..2195a02 100644 --- a/clar.h +++ b/clar.h @@ -110,6 +110,11 @@ void cl_fixture_cleanup(const char *fixture_name); #define cl_skip() clar__skip() +/** + * Timer-related functions + */ +#define cl_reset_timer() clar__reset_timer() + /** * Typed assertion macros */ @@ -133,6 +138,8 @@ void cl_fixture_cleanup(const char *fixture_name); #define cl_assert_equal_p(p1,p2) clar__assert_equal(__FILE__,__LINE__,"Pointer mismatch: " #p1 " != " #p2, 1, "%p", (p1), (p2)) +void clar__reset_timer(void); + void clar__skip(void); void clar__fail( diff --git a/clar/print.h b/clar/print.h index 6529b6b..9fce4d9 100644 --- a/clar/print.h +++ b/clar/print.h @@ -35,6 +35,12 @@ static void clar_print_error(int num, const struct clar_error *error) fflush(stdout); } +static void clar_print_timing(const struct clar_timing *timing) +{ + printf("Benchmark %s::%s %f\n", timing->suite, timing->test, timing->elapsed); + fflush(stdout); +} + static void clar_print_ontest(const char *test_name, int test_number, enum cl_test_status status) { (void)test_name; diff --git a/clar/time.h b/clar/time.h new file mode 100644 index 0000000..9f1ce2c --- /dev/null +++ b/clar/time.h @@ -0,0 +1,71 @@ +#ifdef GIT_WIN32 + +double clar_timer(void) +{ + /* We need the initial tick count to detect if the tick + * count has rolled over. */ + static DWORD initial_tick_count = 0; + + /* GetTickCount returns the number of milliseconds that have + * elapsed since the system was started. */ + DWORD count = GetTickCount(); + + if(initial_tick_count == 0) { + initial_tick_count = count; + } else if (count < initial_tick_count) { + /* The tick count has rolled over - adjust for it. */ + count = (0xFFFFFFFF - initial_tick_count) + count; + } + + return (double count / (double 1000)); +} + +#elif __APPLE__ + +#include + +double clar_timer(void) +{ + uint64_t time = mach_absolute_time(); + static double scaling_factor = 0; + + if (scaling_factor == 0) { + mach_timebase_info_data_t info; + (void)mach_timebase_info(&info); + scaling_factor = (double)info.numer / (double)info.denom; + } + + return (double)time * scaling_factor / 1.0E9; +} + +#elif defined(AMIGA) + +#include + +double clar_timer(void) +{ + struct TimeVal tv; + ITimer->GetUpTime(&tv); + return (doubletv.Seconds + (doubletv.Microseconds / 1.0E6)); +} + +#else + +#include + +double clar_timer(void) +{ + struct timespec tp; + + if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) { + return (double tp.tv_sec + (double tp.tv_nsec / 1.0E9)); + } else { + /* Fall back to using gettimeofday */ + struct timeval tv; + struct timezone tz; + gettimeofday(&tv, &tz); + return (doubletv.tv_sec + (doubletv.tv_usec / 1.0E6)); + } +} + +#endif diff --git a/generate.py b/generate.py index 587efb5..0c0f7f8 100755 --- a/generate.py +++ b/generate.py @@ -18,7 +18,7 @@ def __init__(self, module): def _render_callback(self, cb): if not cb: return ' { NULL, NULL }' - return ' { "%s", &%s }' % (cb['short_name'], cb['symbol']) + return ' { "%s", &%s, %d }' % (cb['short_name'], cb['symbol'], cb['bench']) class DeclarationTemplate(Template): def render(self): @@ -79,21 +79,13 @@ def _replacer(match): return re.sub(SKIP_COMMENTS_REGEX, _replacer, text) - def parse(self, contents): - TEST_FUNC_REGEX = r"^(void\s+(test_%s__(\w+))\s*\(\s*void\s*\))\s*\{" - - contents = self._skip_comments(contents) - regex = re.compile(TEST_FUNC_REGEX % self.name, re.MULTILINE) - - self.callbacks = [] - self.initialize = None - self.cleanup = None - - for (declaration, symbol, short_name) in regex.findall(contents): + def _append_callbacks(self, callbacks, is_bench): + for (declaration, symbol, short_name) in callbacks: data = { "short_name" : short_name, "declaration" : declaration, - "symbol" : symbol + "symbol" : symbol, + "bench" : is_bench, } if short_name == 'initialize': @@ -103,6 +95,21 @@ def parse(self, contents): else: self.callbacks.append(data) + def parse(self, contents): + TEST_FUNC_REGEX = r"^(void\s+(test_%s__(\w+))\s*\(\s*void\s*\))\s*\{" + BENCH_FUNC_REGEX = r"^(void\s+(bench_%s__(\w+))\s*\(\s*void\s*\))\s*\{" + + contents = self._skip_comments(contents) + test_regex = re.compile(TEST_FUNC_REGEX % self.name, re.MULTILINE) + bench_regex = re.compile(BENCH_FUNC_REGEX % self.name, re.MULTILINE) + + self.callbacks = [] + self.initialize = None + self.cleanup = None + + self._append_callbacks(test_regex.findall(contents), False) + self._append_callbacks(bench_regex.findall(contents), True) + return self.callbacks != [] def refresh(self, path): diff --git a/test/clar_test.h b/test/clar_test.h index 0fcaa63..c713769 100644 --- a/test/clar_test.h +++ b/test/clar_test.h @@ -12,5 +12,6 @@ /* Your custom shared includes / defines here */ extern int global_test_counter; +extern int global_is_bench; #endif diff --git a/test/main.c b/test/main.c index 59e56ad..a95e16a 100644 --- a/test/main.c +++ b/test/main.c @@ -18,6 +18,7 @@ */ int global_test_counter = 0; +int global_is_bench = 0; #ifdef _WIN32 int __cdecl main(int argc, char *argv[]) @@ -34,7 +35,10 @@ int main(int argc, char *argv[]) ret = clar_test(argc, argv); /* Your custom cleanup here */ - cl_assert_equal_i(8, global_test_counter); + if (global_is_bench) + cl_assert_equal_i(3, global_test_counter); + else + cl_assert_equal_i(8, global_test_counter); return ret; } diff --git a/test/sample.c b/test/sample.c index faa1209..c509b60 100644 --- a/test/sample.c +++ b/test/sample.c @@ -82,3 +82,39 @@ void test_sample__ptr(void) cl_assert_equal_p(actual, actual); /* pointers to same object */ cl_assert_equal_p(&actual, actual); } + +void bench_sample__loop(void) +{ + int i; + + global_is_bench = 1; + + for (i = 0; i < 1000000; i++) { + } +} + +void bench_sample__loop2(void) +{ + int i; + + global_is_bench = 1; + + for (i = 0; i < 1000000; i++) { + int dummy = i*1000; + dummy = dummy; + } +} + +void bench_sample__reset_timer(void) +{ + int i; + + global_is_bench = 1; + + for (i = 0; i < 100000000; i++) { + int dummy = i*1000; + dummy = dummy; + } + + cl_reset_timer(); +} From 5e925f4edb19bf0b90618c9ee5bb9267ef9f16ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn=20Nieto?= Date: Wed, 4 Nov 2015 08:58:56 -0800 Subject: [PATCH 2/3] Report timings for all tests Instead of separating the benchmark tests from "normal" tests, store timings for all successful tests and let the user decide which tests they'd like to run via the -s option. --- clar.c | 30 +++++++++++++++++------------- generate.py | 33 +++++++++++++-------------------- test/main.c | 6 +----- test/sample.c | 12 +++--------- 4 files changed, 34 insertions(+), 47 deletions(-) diff --git a/clar.c b/clar.c index bff920d..3057cb3 100644 --- a/clar.c +++ b/clar.c @@ -133,7 +133,7 @@ static struct { int exit_on_error; int report_suite_names; - int run_benchmarks; + int report_benchmarks; double timing_start; double timing_end; @@ -157,7 +157,6 @@ static struct { struct clar_func { const char *name; void (*ptr)(void); - int is_bench; }; struct clar_suite { @@ -251,6 +250,8 @@ clar_run_test( const struct clar_func *cleanup) { _clar.test_status = CL_TEST_OK; + _clar.timing_start = 0.0; + _clar.timing_end = 0.0; _clar.trampoline_enabled = 1; CL_TRACE(CL_TRACE__TEST__BEGIN); @@ -327,22 +328,19 @@ clar_run_suite(const struct clar_suite *suite, const char *filter) if (filter && strncmp(test[i].name, filter, matchlen)) continue; - if (test[i].is_bench != _clar.run_benchmarks) - continue; - _clar.active_test = test[i].name; clar_run_test(&test[i], &suite->initialize, &suite->cleanup); if (_clar.exit_on_error && _clar.total_errors) return; - if (test[i].is_bench) { - clar_store_timing(); - } + clar_store_timing(); } - puts(""); - clar_report_timings(); + if (_clar.report_benchmarks) { + puts(""); + clar_report_timings(); + } _clar.active_test = NULL; CL_TRACE(CL_TRACE__SUITE_END); @@ -360,7 +358,7 @@ clar_usage(const char *arg) printf(" -q \tOnly report tests that had an error\n"); printf(" -Q \tQuit as soon as a test fails\n"); printf(" -l \tPrint suite names\n"); - printf(" -b \tRun benchmarks instead of tests\n"); + printf(" -b \tReport test benchmarks\n"); exit(-1); } @@ -420,7 +418,7 @@ clar_parse_args(int argc, char **argv) } case 'b': - _clar.run_benchmarks = 1; + _clar.report_benchmarks = 1; break; case 'q': @@ -509,7 +507,13 @@ clar_test(int argc, char **argv) static void clar_store_timing(void) { - struct clar_timing *timing = calloc(1, sizeof(struct clar_timing)); + struct clar_timing *timing; + + /* Failed tests jump over the timing code */ + if (_clar.timing_end == 0) + return; + + timing = calloc(1, sizeof(struct clar_timing)); if (_clar.timings == NULL) _clar.timings = timing; diff --git a/generate.py b/generate.py index 0c0f7f8..587efb5 100755 --- a/generate.py +++ b/generate.py @@ -18,7 +18,7 @@ def __init__(self, module): def _render_callback(self, cb): if not cb: return ' { NULL, NULL }' - return ' { "%s", &%s, %d }' % (cb['short_name'], cb['symbol'], cb['bench']) + return ' { "%s", &%s }' % (cb['short_name'], cb['symbol']) class DeclarationTemplate(Template): def render(self): @@ -79,13 +79,21 @@ def _replacer(match): return re.sub(SKIP_COMMENTS_REGEX, _replacer, text) - def _append_callbacks(self, callbacks, is_bench): - for (declaration, symbol, short_name) in callbacks: + def parse(self, contents): + TEST_FUNC_REGEX = r"^(void\s+(test_%s__(\w+))\s*\(\s*void\s*\))\s*\{" + + contents = self._skip_comments(contents) + regex = re.compile(TEST_FUNC_REGEX % self.name, re.MULTILINE) + + self.callbacks = [] + self.initialize = None + self.cleanup = None + + for (declaration, symbol, short_name) in regex.findall(contents): data = { "short_name" : short_name, "declaration" : declaration, - "symbol" : symbol, - "bench" : is_bench, + "symbol" : symbol } if short_name == 'initialize': @@ -95,21 +103,6 @@ def _append_callbacks(self, callbacks, is_bench): else: self.callbacks.append(data) - def parse(self, contents): - TEST_FUNC_REGEX = r"^(void\s+(test_%s__(\w+))\s*\(\s*void\s*\))\s*\{" - BENCH_FUNC_REGEX = r"^(void\s+(bench_%s__(\w+))\s*\(\s*void\s*\))\s*\{" - - contents = self._skip_comments(contents) - test_regex = re.compile(TEST_FUNC_REGEX % self.name, re.MULTILINE) - bench_regex = re.compile(BENCH_FUNC_REGEX % self.name, re.MULTILINE) - - self.callbacks = [] - self.initialize = None - self.cleanup = None - - self._append_callbacks(test_regex.findall(contents), False) - self._append_callbacks(bench_regex.findall(contents), True) - return self.callbacks != [] def refresh(self, path): diff --git a/test/main.c b/test/main.c index a95e16a..2721c85 100644 --- a/test/main.c +++ b/test/main.c @@ -18,7 +18,6 @@ */ int global_test_counter = 0; -int global_is_bench = 0; #ifdef _WIN32 int __cdecl main(int argc, char *argv[]) @@ -35,10 +34,7 @@ int main(int argc, char *argv[]) ret = clar_test(argc, argv); /* Your custom cleanup here */ - if (global_is_bench) - cl_assert_equal_i(3, global_test_counter); - else - cl_assert_equal_i(8, global_test_counter); + cl_assert_equal_i(11, global_test_counter); return ret; } diff --git a/test/sample.c b/test/sample.c index c509b60..cf8163a 100644 --- a/test/sample.c +++ b/test/sample.c @@ -83,34 +83,28 @@ void test_sample__ptr(void) cl_assert_equal_p(&actual, actual); } -void bench_sample__loop(void) +void test_sample__bench_loop(void) { int i; - global_is_bench = 1; - for (i = 0; i < 1000000; i++) { } } -void bench_sample__loop2(void) +void test_sample__bench_loop2(void) { int i; - global_is_bench = 1; - for (i = 0; i < 1000000; i++) { int dummy = i*1000; dummy = dummy; } } -void bench_sample__reset_timer(void) +void test_sample__bench_reset_timer(void) { int i; - global_is_bench = 1; - for (i = 0; i < 100000000; i++) { int dummy = i*1000; dummy = dummy; From 7ea744476238fd77ee19b5b6a3a3597393796055 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn=20Nieto?= Date: Tue, 22 Dec 2015 13:58:33 +0000 Subject: [PATCH 3/3] Show timing results at the end Instead of intermixing benchmark results with the suite outputs, keep them all until the end. --- clar.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/clar.c b/clar.c index 3057cb3..2d1c785 100644 --- a/clar.c +++ b/clar.c @@ -240,7 +240,6 @@ clar_report_timings(void) timing = next; } - _clar.timings = _clar.last_timing = NULL; } static void @@ -337,11 +336,6 @@ clar_run_suite(const struct clar_suite *suite, const char *filter) clar_store_timing(); } - if (_clar.report_benchmarks) { - puts(""); - clar_report_timings(); - } - _clar.active_test = NULL; CL_TRACE(CL_TRACE__SUITE_END); } @@ -410,6 +404,11 @@ clar_parse_args(int argc, char **argv) } } + if (_clar.report_benchmarks) { + puts(""); + clar_report_timings(); + } + if (!found) { clar_print_onabort("No suite matching '%s' found.\n", argument); exit(-1); @@ -476,6 +475,11 @@ clar_test_run() size_t i; for (i = 0; i < _clar_suite_count; ++i) clar_run_suite(&_clar_suites[i], NULL); + + if (_clar.report_benchmarks) { + puts(""); + clar_report_timings(); + } } return _clar.total_errors;