Index: CMakeLists.txt
===================================================================
--- CMakeLists.txt
+++ CMakeLists.txt
@@ -146,6 +146,7 @@
 add_subdirectory(SingleSource)
 add_subdirectory(MultiSource)
 add_subdirectory(External)
+add_subdirectory(MicroBenchmarks)
 
 # Produce lit.site.cfg
 configure_file("${PROJECT_SOURCE_DIR}/lit.site.cfg.in" "${CMAKE_BINARY_DIR}/lit.site.cfg")
Index: MicroBenchmarks/CMakeLists.txt
===================================================================
--- /dev/null
+++ MicroBenchmarks/CMakeLists.txt
@@ -0,0 +1,17 @@
+find_package(GoogleBenchmark)
+
+macro(MicroBenchmark executable)
+  llvm_test_run()
+  add_executable(${executable} ${ARGN})
+  target_link_libraries(${executable} ${GOOGLE_BENCHMARK_LIBRARIES})
+  llvm_add_test(${executable} ${executable})
+endmacro()
+
+if(GOOGLE_BENCHMARK_FOUND)
+  include_directories(${GOOGLE_BENCHMARK_INCLUDE_DIR})
+  file(COPY lit.local.cfg DESTINATION "${CMAKE_CURRENT_BINARY_DIR}")
+
+  MicroBenchmark(basic_test basic_test.cc)
+  MicroBenchmark(filter_test filter_test.cc)
+  MicroBenchmark(simpleb simpleb.cc)
+endif()
Index: MicroBenchmarks/basic_test.cc
===================================================================
--- /dev/null
+++ MicroBenchmarks/basic_test.cc
@@ -0,0 +1,102 @@
+
+#include "benchmark/benchmark_api.h"
+
+#define BASIC_BENCHMARK_TEST(x) \
+    BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
+
+void BM_empty(benchmark::State& state) {
+  while (state.KeepRunning()) {
+    benchmark::DoNotOptimize(state.iterations());
+  }
+}
+BENCHMARK(BM_empty);
+BENCHMARK(BM_empty)->ThreadPerCpu();
+
+void BM_spin_empty(benchmark::State& state) {
+  while (state.KeepRunning()) {
+    for (int x = 0; x < state.range_x(); ++x) {
+      benchmark::DoNotOptimize(x);
+    }
+  }
+}
+BASIC_BENCHMARK_TEST(BM_spin_empty);
+BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
+
+void BM_spin_pause_before(benchmark::State& state) {
+  for (int i = 0; i < state.range_x(); ++i) {
+    benchmark::DoNotOptimize(i);
+  }
+  while(state.KeepRunning()) {
+    for (int i = 0; i < state.range_x(); ++i) {
+      benchmark::DoNotOptimize(i);
+    }
+  }
+}
+BASIC_BENCHMARK_TEST(BM_spin_pause_before);
+BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
+
+
+void BM_spin_pause_during(benchmark::State& state) {
+  while(state.KeepRunning()) {
+    state.PauseTiming();
+    for (int i = 0; i < state.range_x(); ++i) {
+      benchmark::DoNotOptimize(i);
+    }
+    state.ResumeTiming();
+    for (int i = 0; i < state.range_x(); ++i) {
+      benchmark::DoNotOptimize(i);
+    }
+  }
+}
+BASIC_BENCHMARK_TEST(BM_spin_pause_during);
+BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
+
+void BM_pause_during(benchmark::State& state) {
+  while(state.KeepRunning()) {
+    state.PauseTiming();
+    state.ResumeTiming();
+  }
+}
+BENCHMARK(BM_pause_during);
+BENCHMARK(BM_pause_during)->ThreadPerCpu();
+BENCHMARK(BM_pause_during)->UseRealTime();
+BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
+
+void BM_spin_pause_after(benchmark::State& state) {
+  while(state.KeepRunning()) {
+    for (int i = 0; i < state.range_x(); ++i) {
+      benchmark::DoNotOptimize(i);
+    }
+  }
+  for (int i = 0; i < state.range_x(); ++i) {
+    benchmark::DoNotOptimize(i);
+  }
+}
+BASIC_BENCHMARK_TEST(BM_spin_pause_after);
+BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
+
+
+void BM_spin_pause_before_and_after(benchmark::State& state) {
+  for (int i = 0; i < state.range_x(); ++i) {
+    benchmark::DoNotOptimize(i);
+  }
+  while(state.KeepRunning()) {
+    for (int i = 0; i < state.range_x(); ++i) {
+      benchmark::DoNotOptimize(i);
+    }
+  }
+  for (int i = 0; i < state.range_x(); ++i) {
+    benchmark::DoNotOptimize(i);
+  }
+}
+BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
+BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
+
+
+void BM_empty_stop_start(benchmark::State& state) {
+  while (state.KeepRunning()) { }
+}
+BENCHMARK(BM_empty_stop_start);
+BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
+
+BENCHMARK_MAIN()
Index: MicroBenchmarks/filter_test.cc
===================================================================
--- /dev/null
+++ MicroBenchmarks/filter_test.cc
@@ -0,0 +1,91 @@
+#include "benchmark/benchmark.h"
+
+#include <cassert>
+#include <cmath>
+#include <cstdint>
+#include <cstdlib>
+
+#include <iostream>
+#include <limits>
+#include <sstream>
+#include <string>
+
+namespace {
+
+class TestReporter : public benchmark::ConsoleReporter {
+ public:
+  virtual bool ReportContext(const Context& context) {
+    return ConsoleReporter::ReportContext(context);
+  };
+
+  virtual void ReportRuns(const std::vector<Run>& report) {
+    ++count_;
+    ConsoleReporter::ReportRuns(report);
+  };
+
+  TestReporter() : count_(0) {}
+
+  virtual ~TestReporter() {}
+
+  size_t GetCount() const {
+    return count_;
+  }
+
+ private:
+  mutable size_t count_;
+};
+
+}  // end namespace
+
+
+static void NoPrefix(benchmark::State& state) {
+  while (state.KeepRunning()) {}
+}
+BENCHMARK(NoPrefix);
+
+static void BM_Foo(benchmark::State& state) {
+  while (state.KeepRunning()) {}
+}
+BENCHMARK(BM_Foo);
+
+
+static void BM_Bar(benchmark::State& state) {
+  while (state.KeepRunning()) {}
+}
+BENCHMARK(BM_Bar);
+
+
+static void BM_FooBar(benchmark::State& state) {
+  while (state.KeepRunning()) {}
+}
+BENCHMARK(BM_FooBar);
+
+
+static void BM_FooBa(benchmark::State& state) {
+  while (state.KeepRunning()) {}
+}
+BENCHMARK(BM_FooBa);
+
+
+
+int main(int argc, char* argv[]) {
+  benchmark::Initialize(&argc, argv);
+
+  TestReporter test_reporter;
+  benchmark::RunSpecifiedBenchmarks(&test_reporter);
+
+  if (argc == 2) {
+    // Make sure we ran all of the tests
+    std::stringstream ss(argv[1]);
+    size_t expected;
+    ss >> expected;
+
+    const size_t count = test_reporter.GetCount();
+    if (count != expected) {
+      std::cerr << "ERROR: Expected " << expected << " tests to be ran but only "
+                << count << " completed" << std::endl;
+      return -1;
+    }
+  }
+  return 0;
+}
Index: MicroBenchmarks/lit.local.cfg
===================================================================
--- /dev/null
+++ MicroBenchmarks/lit.local.cfg
@@ -0,0 +1 @@
+config.microbenchmark = True
Index: MicroBenchmarks/simpleb.cc
===================================================================
--- /dev/null
+++ MicroBenchmarks/simpleb.cc
@@ -0,0 +1,15 @@
+
+#include "benchmark/benchmark_api.h"
+
+#define BASIC_BENCHMARK_TEST(x) \
+    BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
+
+void BM_empty(benchmark::State& state) {
+  while (state.KeepRunning()) {
+    benchmark::DoNotOptimize(state.iterations());
+  }
+}
+BENCHMARK(BM_empty);
+BENCHMARK(BM_empty)->ThreadPerCpu();
+
+BENCHMARK_MAIN()
Index: cmake/FindGoogleBenchmark.cmake
===================================================================
--- /dev/null
+++ cmake/FindGoogleBenchmark.cmake
@@ -0,0 +1,10 @@
+include(FindPackageHandleStandardArgs)
+
+find_path(GOOGLE_BENCHMARK_INCLUDE_DIR benchmark/benchmark.h)
+find_library(GOOGLE_BENCHMARK_LIBRARIES NAMES benchmark libbenchmark)
+
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(GOOGLE_BENCHMARK
+  REQUIRED_VARS GOOGLE_BENCHMARK_LIBRARIES GOOGLE_BENCHMARK_INCLUDE_DIR)
+mark_as_advanced(GOOGLE_BENCHMARK_LIBRARIES GOOGLE_BENCHMARK_INCLUDE_DIR)
+
+
Index: lit.cfg
===================================================================
--- lit.cfg
+++ lit.cfg
@@ -10,6 +10,7 @@
 config.excludes = ['ABI-Testsuite']
 config.remote_flags = ""
 config.traditional_output = True
+config.microbenchmark = False
 if 'SSH_AUTH_SOCK' in os.environ:
     config.environment['SSH_AUTH_SOCK'] = os.environ['SSH_AUTH_SOCK']
 
Index: litsupport/googlebenchmark.py
===================================================================
--- /dev/null
+++ litsupport/googlebenchmark.py
@@ -0,0 +1,40 @@
+import testplan
+import logging
+import lit.Test
+
+
+def mutateCommandLine(context, commandline):
+    benchmarkfile = context.tmpBase + ".benchmark.csv"
+    context.benchmarkfile = benchmarkfile
+    return commandline + " --benchmark_format=csv > %s" % benchmarkfile
+
+
+def _collectBenchmarkResult(context, benchmarkfile):
+    metrics = {}
+    result = open(benchmarkfile, "r").read()
+    found_header = True
+    for line in result.splitlines():
+        if not found_header:
+            if "name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label" in line:
+                found_header = True
+            continue
+
+        try:
+            line = line.strip()
+            values = line.split(",")
+            print "Values: %s" % (values,)
+            name = values[0]
+            metrics["%s.iterations" % name] = lit.Test.toMetricValue(int(values[1]))
+            metrics["%s.real_time" % name] = lit.Test.toMetricValue(float(values[2]))
+        except:
+            logging.warning("%s: Could not interpret line '%s'", benchmarkfile, line)
+            continue
+    return metrics
+
+
+def mutatePlan(context, plan):
+    plan.runscript = testplan.mutateScript(context, plan.runscript,
+                                           mutateCommandLine)
+    plan.metric_collectors.append(
+        lambda context: _collectBenchmarkResult(context, context.benchmarkfile)
+    )
Index: litsupport/test.py
===================================================================
--- litsupport/test.py
+++ litsupport/test.py
@@ -10,6 +10,7 @@
 
 import codesize
 import compiletime
+import googlebenchmark
 import hash
 import perf
 import profilegen
@@ -78,17 +79,22 @@
         lit.util.mkdir_p(os.path.dirname(tmpBase))
 
         # Prepare test plan
-        run_under.mutatePlan(context, plan)
-        timeit.mutatePlan(context, plan)
-        compiletime.mutatePlan(context, plan)
-        codesize.mutatePlan(context, plan)
-        hash.mutatePlan(context, plan)
-        if config.profile_generate:
-            profilegen.mutatePlan(context, plan)
-        if config.remote_host:
-            remote.mutatePlan(context, plan)
-        if litConfig.params.get('profile') == 'perf':
-            perf.mutatePlan(context, plan)
+        if test.config.microbenchmark:
+            googlebenchmark.mutatePlan(context, plan)
+            if config.remote_host:
+                remote.mutatePlan(context, plan)
+        else:
+            run_under.mutatePlan(context, plan)
+            timeit.mutatePlan(context, plan)
+            compiletime.mutatePlan(context, plan)
+            codesize.mutatePlan(context, plan)
+            hash.mutatePlan(context, plan)
+            if config.profile_generate:
+                profilegen.mutatePlan(context, plan)
+            if config.remote_host:
+                remote.mutatePlan(context, plan)
+            if litConfig.params.get('profile') == 'perf':
+                perf.mutatePlan(context, plan)
 
         # Execute Test plan
         result = testplan.executePlanTestResult(context, plan)