Index: MicroBenchmarks/XRay/FDRMode/CMakeLists.txt =================================================================== --- MicroBenchmarks/XRay/FDRMode/CMakeLists.txt +++ MicroBenchmarks/XRay/FDRMode/CMakeLists.txt @@ -4,20 +4,7 @@ list(APPEND CPPFLAGS -std=c++11 -Wl,--gc-sections -fxray-instrument) list(APPEND LDFLAGS -fxray-instrument) - llvm_test_run(--benchmark_filter=dummy_skip_ignore) + llvm_test_run(--benchmark_repetitions=10) llvm_test_executable(fdrmode-bench fdrmode-bench.cc) target_link_libraries(fdrmode-bench benchmark) - - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_1_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_2_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_4_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_8_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_16_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_32_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) endif() Index: MicroBenchmarks/XRay/ReturnReference/CMakeLists.txt =================================================================== --- MicroBenchmarks/XRay/ReturnReference/CMakeLists.txt +++ MicroBenchmarks/XRay/ReturnReference/CMakeLists.txt @@ -4,20 +4,7 @@ list(APPEND CPPFLAGS -std=c++11 -Wl,--gc-sections -fxray-instrument) list(APPEND LDFLAGS -fxray-instrument) - llvm_test_run(--benchmark_filter=dummy_skip_ignore) + llvm_test_run(--benchmark_repetitions=10) llvm_test_executable(retref-bench retref-bench.cc) target_link_libraries(retref-bench benchmark) - - file(COPY retref-bench_BM_ReturnNeverInstrumented.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedUnPatched.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedPatchedThenUnpatched.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedPatched.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_RDTSCP_Cost.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedPatchedWithLogHandler.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) endif() Index: litsupport/modules/microbenchmark.py =================================================================== --- litsupport/modules/microbenchmark.py +++ litsupport/modules/microbenchmark.py @@ -8,6 +8,7 @@ def _mutateCommandLine(context, commandline): cmd = shellcommand.parse(commandline) cmd.arguments.append("--benchmark_format=csv") + cmd.arguments.append("--benchmark_report_aggregates_only=true") # We need stdout outself to get the benchmark csv data. if cmd.stdout is not None: raise Exception("Rerouting stdout not allowed for microbenchmarks") @@ -23,7 +24,6 @@ def _collectMicrobenchmarkTime(context, microbenchfiles): - result = 0.0 for f in microbenchfiles: with open(f) as inp: lines = csv.reader(inp) @@ -31,10 +31,22 @@ for line in lines: if line[0] == 'name': continue - # Note that we cannot create new tests here, so for now we just - # add up all the numbers here. - result += float(line[3]) - return {'microbenchmark_time_ns': lit.Test.toMetricValue(result)} + # Name for MicroBenchmark + name = line[0][:-5] + # Create Result object with PASS + microBenchmark = lit.Test.Result(lit.Test.PASS) + + # Use Mean as Reported Time. Index 3 is cpu_time + microBenchmark.addMetric('exec_time', lit.Test.toMetricValue(float(line[3]))) + microBenchmark.addMetric('iterations', lit.Test.toMetricValue(int(line[1]))) + medianLine = next(lines) + stdDevLine = next(lines) + microBenchmark.addMetric('std_dev', lit.Test.toMetricValue(float(stdDevLine[3]))) + + # Add Micro Result + context.micro_results[name] = microBenchmark + + return ({'MicroBenchmarks': lit.Test.toMetricValue(len(context.micro_results))}) def mutatePlan(context, plan): Index: litsupport/testplan.py =================================================================== --- litsupport/testplan.py +++ litsupport/testplan.py @@ -143,6 +143,7 @@ lit.test.Result() object for the results.""" context.result_output = "" context.result_metrics = {} + context.micro_results = {} result_code = _executePlan(context, testplan) @@ -150,6 +151,9 @@ result = lit.Test.Result(result_code, context.result_output) for key, value in context.result_metrics.items(): result.addMetric(key, value) + for key, value in context.micro_results.items(): + result.addMicroResult(key, value) + return result Index: utils/lit/lit/Test.py =================================================================== --- utils/lit/lit/Test.py +++ utils/lit/lit/Test.py @@ -135,6 +135,8 @@ self.elapsed = elapsed # The metrics reported by this test. self.metrics = {} + # The micro-test results reported by this test. + self.microResults = {} def addMetric(self, name, value): """ @@ -153,6 +155,24 @@ raise TypeError("unexpected metric value: %r" % (value,)) self.metrics[name] = value + def addMicroResult(self, name, microResult): + """ + addMicroResult(microResult) + + Attach a micro-test result to the test result, with the given name and + result. It is an error to attempt to attach a micro-test with the + same name multiple times. + + Each micro-test result must be an instance of the Result class. + """ + if name in self.microResults: + raise ValueError("Result already includes microResult for %r" % ( + name,)) + if not isinstance(microResult, Result): + raise TypeError("unexpected MicroResult value %r" % (microResult,)) + self.microResults[name] = microResult + + # Test classes. class TestSuite: Index: utils/lit/lit/main.py =================================================================== --- utils/lit/lit/main.py +++ utils/lit/lit/main.py @@ -81,6 +81,17 @@ print('%s: %s ' % (metric_name, value.format())) print("*" * 10) + # Report micro-tests, if present + if test.result.microResults: + items = sorted(test.result.microResults.items()) + for micro_test_name, micro_test in items: + print("%s MICRO-TEST '%s RESULTS %s" % + ('*'*3, micro_test_name, '*'*3)) + + for metric_name, value in micro_test.metrics.items(): + print(' %s: %s ' % (metric_name, value.format())) + print("*" * 10) + # Ensure the output is flushed. sys.stdout.flush() @@ -113,6 +124,23 @@ for key, value in test.result.metrics.items(): metrics_data[key] = value.todata() + # Report micro-tests separately, if present + if test.result.microResults: + for key, micro_test in test.result.microResults.items(): + micro_full_name = test.getFullName()[:-4] + key + ".test" + + micro_test_data = { + 'name' : micro_full_name, + 'code' : micro_test.code.name, + 'output' : micro_test.output, + 'elapsed' : micro_test.elapsed } + if micro_test.metrics: + micro_test_data['metrics'] = micro_metrics_data = {} + for key, value in micro_test.metrics.items(): + micro_metrics_data[key] = value.todata() + + tests_data.append(micro_test_data) + tests_data.append(test_data) # Write the output.