Index: MicroBenchmarks/CMakeLists.txt
===================================================================
--- MicroBenchmarks/CMakeLists.txt
+++ MicroBenchmarks/CMakeLists.txt
@@ -1,2 +1,5 @@
+file(COPY lit.local.cfg DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
+
 add_subdirectory(libs)
 add_subdirectory(XRay)
+add_subdirectory(LCALS)
Index: MicroBenchmarks/XRay/FDRMode/CMakeLists.txt
===================================================================
--- MicroBenchmarks/XRay/FDRMode/CMakeLists.txt
+++ MicroBenchmarks/XRay/FDRMode/CMakeLists.txt
@@ -1,23 +1,8 @@
 check_cxx_compiler_flag(-fxray-instrument COMPILER_HAS_FXRAY_INSTRUMENT)
 if(ARCH STREQUAL "x86" AND COMPILER_HAS_FXRAY_INSTRUMENT)
-  file(COPY lit.local.cfg DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-
   list(APPEND CPPFLAGS -std=c++11 -Wl,--gc-sections -fxray-instrument)
   list(APPEND LDFLAGS -fxray-instrument)
-  llvm_test_run(--benchmark_filter=dummy_skip_ignore)
+  llvm_test_run()
   llvm_test_executable(fdrmode-bench fdrmode-bench.cc)
   target_link_libraries(fdrmode-bench benchmark)
-
-  file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_1_thread.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-  file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_2_thread.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-  file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_4_thread.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-  file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_8_thread.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-  file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_16_thread.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-  file(COPY fdrmode-bench_BM_XRayFDRMultiThreaded_32_thread.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
 endif()
Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_16_thread.test
===================================================================
--- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_16_thread.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:16$
Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_1_thread.test
===================================================================
--- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_1_thread.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:1$
Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_2_thread.test
===================================================================
--- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_2_thread.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:2$
Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_32_thread.test
===================================================================
--- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_32_thread.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:32$
Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_4_thread.test
===================================================================
--- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_4_thread.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:4$
Index: MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_8_thread.test
===================================================================
--- MicroBenchmarks/XRay/FDRMode/fdrmode-bench_BM_XRayFDRMultiThreaded_8_thread.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/fdrmode-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_XRayFDRMultiThreaded/threads:8$
Index: MicroBenchmarks/XRay/FDRMode/lit.local.cfg
===================================================================
--- MicroBenchmarks/XRay/FDRMode/lit.local.cfg
+++ /dev/null
@@ -1,8 +0,0 @@
-config.environment['XRAY_OPTIONS'] = 'patch_premain=false xray_naive_log=false xray_fdr_log=true'
-test_modules = config.test_modules
-if 'run' in test_modules:
-    # Insert microbenchmark module behind 'run'
-    test_modules.insert(test_modules.index('run')+1, 'microbenchmark')
-    # Timeit results are not useful for microbenchmarks
-    if 'timeit' in test_modules:
-        test_modules.remove('timeit')
Index: MicroBenchmarks/XRay/ReturnReference/CMakeLists.txt
===================================================================
--- MicroBenchmarks/XRay/ReturnReference/CMakeLists.txt
+++ MicroBenchmarks/XRay/ReturnReference/CMakeLists.txt
@@ -1,23 +1,8 @@
 check_cxx_compiler_flag(-fxray-instrument COMPILER_HAS_FXRAY_INSTRUMENT)
 if(ARCH STREQUAL "x86" AND COMPILER_HAS_FXRAY_INSTRUMENT)
-  file(COPY lit.local.cfg DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-
   list(APPEND CPPFLAGS -std=c++11 -Wl,--gc-sections -fxray-instrument)
   list(APPEND LDFLAGS -fxray-instrument)
-  llvm_test_run(--benchmark_filter=dummy_skip_ignore)
+  llvm_test_run()
   llvm_test_executable(retref-bench retref-bench.cc)
   target_link_libraries(retref-bench benchmark)
-
-  file(COPY retref-bench_BM_ReturnNeverInstrumented.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-  file(COPY retref-bench_BM_ReturnInstrumentedUnPatched.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-  file(COPY retref-bench_BM_ReturnInstrumentedPatchedThenUnpatched.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-  file(COPY retref-bench_BM_ReturnInstrumentedPatched.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-  file(COPY retref-bench_BM_RDTSCP_Cost.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
-  file(COPY retref-bench_BM_ReturnInstrumentedPatchedWithLogHandler.test
-       DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
 endif()
Index: MicroBenchmarks/XRay/ReturnReference/lit.local.cfg
===================================================================
--- MicroBenchmarks/XRay/ReturnReference/lit.local.cfg
+++ /dev/null
@@ -1,8 +0,0 @@
-config.environment['XRAY_OPTIONS'] = 'patch_premain=false xray_naive_log=false'
-test_modules = config.test_modules
-if 'run' in test_modules:
-    # Insert microbenchmark module behind 'run'
-    test_modules.insert(test_modules.index('run')+1, 'microbenchmark')
-    # Timeit results are not useful for microbenchmarks
-    if 'timeit' in test_modules:
-        test_modules.remove('timeit')
Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_RDTSCP_Cost.test
===================================================================
--- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_RDTSCP_Cost.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_RDTSCP_Cost
Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatched.test
===================================================================
--- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatched.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_ReturnInstrumentedPatched
Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatchedThenUnpatched.test
===================================================================
--- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatchedThenUnpatched.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_ReturnInstrumentedPatchedThenUnpatched
Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatchedWithLogHandler.test
===================================================================
--- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedPatchedWithLogHandler.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_ReturnInstrumentedPatchedWithLogHandler
Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedUnPatched.test
===================================================================
--- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnInstrumentedUnPatched.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_ReturnInstrumentedUnPatched
Index: MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnNeverInstrumented.test
===================================================================
--- MicroBenchmarks/XRay/ReturnReference/retref-bench_BM_ReturnNeverInstrumented.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %S/retref-bench --benchmark_repetitions=10 --benchmark_report_aggregates_only=true --benchmark_filter=BM_ReturnNeverInstrumented
Index: litsupport/modules/microbenchmark.py
===================================================================
--- litsupport/modules/microbenchmark.py
+++ litsupport/modules/microbenchmark.py
@@ -23,7 +23,6 @@
 
 
 def _collectMicrobenchmarkTime(context, microbenchfiles):
-    result = 0.0
     for f in microbenchfiles:
         with open(f) as inp:
             lines = csv.reader(inp)
@@ -31,10 +30,19 @@
             for line in lines:
                 if line[0] == 'name':
                     continue
-                # Note that we cannot create new tests here, so for now we just
-                # add up all the numbers here.
-                result += float(line[3])
-    return {'microbenchmark_time_ns': lit.Test.toMetricValue(result)}
+                # Name for MicroBenchmark
+                name = line[0]
+                # Create Result object with PASS
+                microBenchmark = lit.Test.Result(lit.Test.PASS)
+
+                # Index 3 is cpu_time
+                microBenchmark.addMetric('exec_time', lit.Test.toMetricValue(float(line[3])))
+              
+                # Add Micro Result 
+                context.micro_results[name] = microBenchmark
+
+    # returning the number of microbenchmarks collected as a metric for the base test
+    return ({'MicroBenchmarks': lit.Test.toMetricValue(len(context.micro_results))})
 
 
 def mutatePlan(context, plan):
Index: litsupport/testplan.py
===================================================================
--- litsupport/testplan.py
+++ litsupport/testplan.py
@@ -143,6 +143,7 @@
     lit.test.Result() object for the results."""
     context.result_output = ""
     context.result_metrics = {}
+    context.micro_results = {}
 
     result_code = _executePlan(context, testplan)
 
@@ -150,6 +151,9 @@
     result = lit.Test.Result(result_code, context.result_output)
     for key, value in context.result_metrics.items():
         result.addMetric(key, value)
+    for key, value in context.micro_results.items():
+        result.addMicroResult(key, value)
+
     return result