Index: MicroBenchmarks/XRay/CMakeLists.txt =================================================================== --- MicroBenchmarks/XRay/CMakeLists.txt +++ MicroBenchmarks/XRay/CMakeLists.txt @@ -4,20 +4,7 @@ list(APPEND CPPFLAGS -std=c++11 -Wl,--gc-sections -fxray-instrument) list(APPEND LDFLAGS -fxray-instrument) - llvm_test_run(--benchmark_filter=dummy_skip_ignore) + llvm_test_run(--benchmark_repetitions=10) llvm_test_executable(retref-bench retref-bench.cc) target_link_libraries(retref-bench benchmark) - - file(COPY retref-bench_BM_ReturnNeverInstrumented.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedUnPatched.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedPatchedThenUnpatched.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedPatched.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_RDTSCP_Cost.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedPatchedWithLogHandler.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) endif() Index: litsupport/modules/microbenchmark.py =================================================================== --- litsupport/modules/microbenchmark.py +++ litsupport/modules/microbenchmark.py @@ -8,6 +8,7 @@ def _mutateCommandLine(context, commandline): cmd = shellcommand.parse(commandline) cmd.arguments.append("--benchmark_format=csv") + cmd.arguments.append("--benchmark_report_aggregates_only=true") # We need stdout outself to get the benchmark csv data. if cmd.stdout is not None: raise Exception("Rerouting stdout not allowed for microbenchmarks") @@ -23,7 +24,8 @@ def _collectMicrobenchmarkTime(context, microbenchfiles): - result = 0.0 + microMap = {} + microMap['microbenchmarks'] = {} for f in microbenchfiles: with open(f) as inp: lines = csv.reader(inp) @@ -31,11 +33,21 @@ for line in lines: if line[0] == 'name': continue - # Note that we cannot create new tests here, so for now we just - # add up all the numbers here. - result += float(line[3]) - return {'microbenchmark_time_ns': lit.Test.toMetricValue(result)} + # Capture new test exec_time (mean) + # NOTE: Left control of --benchmark_repetitions=[#] for tests + exec_time = float(line[3]) # * .000000001 ns conversion? + data = {'exec_time': exec_time} + # Capture next line (stddev) + line2 = next(lines) + stddev = float(line2[3]) # * .000000001 ns conversion? + data['std_dev'] = stddev + + # Remove _mean from test name?? + name = line[0][:-5] + microMap['microbenchmarks'][name] = {'metrics': data} + + return microMap def mutatePlan(context, plan): context.microbenchfiles = [] Index: lnt/tests/test_suite.py =================================================================== --- lnt/tests/test_suite.py +++ lnt/tests/test_suite.py @@ -111,6 +111,22 @@ str) +def _add_test_entry_to_suite(tests_by_suite, suite_name, test_name, path, time, code, metrics): + if 'microbenchmarks' in metrics: + for key, val in metrics['microbenchmarks'].items(): + # Recursive Call + _add_test_entry_to_suite(tests_by_suite, suite_name, test_name + key, path, time, code, val.get('metrics')) + + entry = {'name': test_name, + 'path': '.'.join(path), + 'time': time, + 'code': code, + 'metrics': metrics} + if code != "PASS": + entry['output'] = output + + tests_by_suite[suite_name].append(entry) + def _lit_json_to_template(json_reports, template_engine): # For now, only show first runs report. json_report = json_reports[0] @@ -126,15 +142,8 @@ test_name = x[1].strip().split("/")[-1] path = x[1].strip().split("/")[:-1] - entry = {'name': test_name, - 'path': '.'.join(path), - 'time': time, - 'code': code, - 'metrics': tests.get('metrics', None)} - if code != "PASS": - entry['output'] = output - - tests_by_suite[suite_name].append(entry) + _add_test_entry_to_suite(tests_by_suite, suite_name, test_name, path, time, code, tests.get('metrics')) + suites = [] for id, suite in enumerate(tests_by_suite): tests = tests_by_suite[suite] @@ -664,7 +673,7 @@ return lnt.testing.util.compilers.get_cc_info( cmake_vars["CMAKE_C_COMPILER"], target_flags) - def _parse_lit_output(self, path, data, cmake_vars, only_test=False): + def process_test(self, code, name, test_data, ignore, test_info, test_samples): LIT_METRIC_TO_LNT = { 'compile_time': 'compile', 'exec_time': 'exec', @@ -682,6 +691,61 @@ 'size.__text': float, } + # If --single-result is given, exit based on --single-result-predicate + is_pass = self._is_pass_code(code) + if self.opts.single_result and \ + raw_name == self.opts.single_result + '.test': + env = {'status': is_pass} + if 'metrics' in test_data: + for k, v in test_data['metrics'].items(): + env[k] = v + if k in LIT_METRIC_TO_LNT: + env[LIT_METRIC_TO_LNT[k]] = v + status = eval(self.opts.single_result_predicate, {}, env) + sys.exit(0 if status else 1) + + if 'metrics' in test_data: + for k, v in sorted(test_data['metrics'].items()): + if k == 'profile': + profiles_to_import.append((name, v)) + continue + + if k == 'microbenchmarks': + for key, val in v.items(): + micro_name = name + '/' + key + # Recursive Call + self.process_test(code, micro_name, val, ignore, test_info, test_samples) + + if k not in LIT_METRIC_TO_LNT or LIT_METRIC_TO_LNT[k] in ignore: + continue + server_name = name + '.' + LIT_METRIC_TO_LNT[k] + + if k == 'link_time': + # Move link time into a second benchmark's compile-time. + server_name = name + '-link.' + LIT_METRIC_TO_LNT[k] + + test_samples.append( + lnt.testing.TestSamples(server_name, + [v], + test_info, + LIT_METRIC_CONV_FN[k])) + + if code == 'NOEXE': + test_samples.append( + lnt.testing.TestSamples(name + '.compile.status', + [lnt.testing.FAIL], + test_info)) + no_errors = False + + elif not is_pass: + test_samples.append( + lnt.testing.TestSamples(name + '.exec.status', + [self._get_lnt_code(test_data['code'])], + test_info)) + no_errors = False + + + def _parse_lit_output(self, path, data, cmake_vars, only_test=False): # We don't use the test info, currently. test_info = {} test_samples = [] @@ -708,52 +772,7 @@ name = name[:-5] name = 'nts.' + name - # If --single-result is given, exit based on --single-result-predicate - is_pass = self._is_pass_code(code) - if self.opts.single_result and \ - raw_name == self.opts.single_result + '.test': - env = {'status': is_pass} - if 'metrics' in test_data: - for k, v in test_data['metrics'].items(): - env[k] = v - if k in LIT_METRIC_TO_LNT: - env[LIT_METRIC_TO_LNT[k]] = v - status = eval(self.opts.single_result_predicate, {}, env) - sys.exit(0 if status else 1) - - if 'metrics' in test_data: - for k, v in sorted(test_data['metrics'].items()): - if k == 'profile': - profiles_to_import.append((name, v)) - continue - - if k not in LIT_METRIC_TO_LNT or LIT_METRIC_TO_LNT[k] in ignore: - continue - server_name = name + '.' + LIT_METRIC_TO_LNT[k] - - if k == 'link_time': - # Move link time into a second benchmark's compile-time. - server_name = name + '-link.' + LIT_METRIC_TO_LNT[k] - - test_samples.append( - lnt.testing.TestSamples(server_name, - [v], - test_info, - LIT_METRIC_CONV_FN[k])) - - if code == 'NOEXE': - test_samples.append( - lnt.testing.TestSamples(name + '.compile.status', - [lnt.testing.FAIL], - test_info)) - no_errors = False - - elif not is_pass: - test_samples.append( - lnt.testing.TestSamples(name + '.exec.status', - [self._get_lnt_code(test_data['code'])], - test_info)) - no_errors = False + self.process_test(code, name, test_data, ignore, test_info, test_samples) # Now import the profiles in parallel. if profiles_to_import: