Index: MicroBenchmarks/XRay/FDRMode/CMakeLists.txt =================================================================== --- MicroBenchmarks/XRay/FDRMode/CMakeLists.txt +++ MicroBenchmarks/XRay/FDRMode/CMakeLists.txt @@ -4,20 +4,7 @@ list(APPEND CPPFLAGS -std=c++11 -Wl,--gc-sections -fxray-instrument) list(APPEND LDFLAGS -fxray-instrument) - llvm_test_run(--benchmark_filter=dummy_skip_ignore) + llvm_test_run() llvm_test_executable(fdrmode-bench fdrmode-bench.cc) target_link_libraries(fdrmode-bench benchmark) - - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_1_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_2_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_4_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_8_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_16_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_32_thread.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) endif() Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_16_thread.test =================================================================== --- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_16_thread.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:16$ Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_1_thread.test =================================================================== --- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_1_thread.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:1$ Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_2_thread.test =================================================================== --- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_2_thread.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:2$ Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_32_thread.test =================================================================== --- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_32_thread.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:32$ Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_4_thread.test =================================================================== --- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_4_thread.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:4$ Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_8_thread.test =================================================================== --- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_8_thread.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:8$ Index: MicroBenchmarks/XRay/ReturnReference/CMakeLists.txt =================================================================== --- MicroBenchmarks/XRay/ReturnReference/CMakeLists.txt +++ MicroBenchmarks/XRay/ReturnReference/CMakeLists.txt @@ -4,20 +4,7 @@ list(APPEND CPPFLAGS -std=c++11 -Wl,--gc-sections -fxray-instrument) list(APPEND LDFLAGS -fxray-instrument) - llvm_test_run(--benchmark_filter=dummy_skip_ignore) + llvm_test_run() llvm_test_executable(retref-bench retref-bench.cc) target_link_libraries(retref-bench benchmark) - - file(COPY retref-bench_BM_ReturnNeverInstrumented.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedUnPatched.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedPatchedThenUnpatched.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedPatched.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_RDTSCP_Cost.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - file(COPY retref-bench_BM_ReturnInstrumentedPatchedWithLogHandler.test - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) endif() Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_RDTSCP_Cost.test =================================================================== --- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_RDTSCP_Cost.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_RDTSCP_Cost Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatched.test =================================================================== --- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatched.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_ReturnInstrumentedPatched Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatchedThenUnpatched.test =================================================================== --- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatchedThenUnpatched.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_ReturnInstrumentedPatchedThenUnpatched Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatchedWithLogHandler.test =================================================================== --- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatchedWithLogHandler.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_ReturnInstrumentedPatchedWithLogHandler Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedUnPatched.test =================================================================== --- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedUnPatched.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_ReturnInstrumentedUnPatched Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnNeverInstrumented.test =================================================================== --- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnNeverInstrumented.test +++ /dev/null @@ -1 +0,0 @@ -RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_ReturnNeverInstrumented Index: litsupport/modules/microbenchmark.py =================================================================== --- litsupport/modules/microbenchmark.py +++ litsupport/modules/microbenchmark.py @@ -23,7 +23,6 @@ def _collectMicrobenchmarkTime(context, microbenchfiles): - result = 0.0 for f in microbenchfiles: with open(f) as inp: lines = csv.reader(inp) @@ -31,10 +30,19 @@ for line in lines: if line[0] == 'name': continue - # Note that we cannot create new tests here, so for now we just - # add up all the numbers here. - result += float(line[3]) - return {'microbenchmark_time_ns': lit.Test.toMetricValue(result)} + # Name for MicroBenchmark + name = line[0] + # Create Result object with PASS + microBenchmark = lit.Test.Result(lit.Test.PASS) + + # Index 3 is cpu_time + microBenchmark.addMetric('exec_time', lit.Test.toMetricValue(float(line[3]))) + + # Add Micro Result + context.micro_results[name] = microBenchmark + + # returning the number of microbenchmarks collected as a metric for the base test + return ({'MicroBenchmarks': lit.Test.toMetricValue(len(context.micro_results))}) def mutatePlan(context, plan): Index: litsupport/testplan.py =================================================================== --- litsupport/testplan.py +++ litsupport/testplan.py @@ -143,6 +143,7 @@ lit.test.Result() object for the results.""" context.result_output = "" context.result_metrics = {} + context.micro_results = {} result_code = _executePlan(context, testplan) @@ -150,6 +151,9 @@ result = lit.Test.Result(result_code, context.result_output) for key, value in context.result_metrics.items(): result.addMetric(key, value) + for key, value in context.micro_results.items(): + result.addMicroResult(key, value) + return result