diff --git a/lld/test/CMakeLists.txt b/lld/test/CMakeLists.txt --- a/lld/test/CMakeLists.txt +++ b/lld/test/CMakeLists.txt @@ -33,13 +33,15 @@ ) endif() +set(LLD_ADDITIONAL_TESTS "") if (LLVM_INCLUDE_TESTS) - list(APPEND LLD_TEST_DEPS LLDUnitTests) + list(APPEND LLD_ADDITIONAL_TESTS LLDUnitTests) + list(APPEND LLD_ADDITIONAL_TESTS check-lld-benchmark) endif() add_lit_testsuite(check-lld "Running lld test suite" ${CMAKE_CURRENT_BINARY_DIR} - DEPENDS ${LLD_TEST_DEPS} + DEPENDS ${LLD_TEST_DEPS} ${LLD_ADDITIONAL_TESTS} ) add_custom_target(lld-test-depends DEPENDS ${LLD_TEST_DEPS}) diff --git a/lld/unittests/CMakeLists.txt b/lld/unittests/CMakeLists.txt --- a/lld/unittests/CMakeLists.txt +++ b/lld/unittests/CMakeLists.txt @@ -14,3 +14,10 @@ add_subdirectory(DriverTests) add_subdirectory(MachOTests) + +add_custom_target(check-lld-benchmark + COMMAND ${CMAKE_COMMAND} -E env + PYTHONPATH=${CMAKE_CURRENT_SOURCE_DIR}/../utils + "${Python3_EXECUTABLE}" -m unittest test_benchmark.py + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) +set_target_properties(check-lld-benchmark PROPERTIES FOLDER "lld tests") diff --git a/lld/unittests/test_benchmark.py b/lld/unittests/test_benchmark.py new file mode 100644 --- /dev/null +++ b/lld/unittests/test_benchmark.py @@ -0,0 +1,53 @@ +import unittest + +import benchmark + + +class TestBenchmark(unittest.TestCase): + def test_parse_perf_run(self): + with_units = b""" + Performance counter stats for 'ld64.lld.darwinnew @response.txt': + + 4,307.46 msec task-clock # 0.999 CPUs utilized + 70 context-switches # 0.016 K/sec + 1 cpu-migrations # 0.000 K/sec + 53,770 page-faults # 0.012 M/sec + cycles + instructions + branches + branch-misses + + 4.310496496 seconds time elapsed + + 4.048564000 seconds user + 0.258972000 seconds sys +""" + + without_units = b""" + Performance counter stats for 'ld64.lld.darwinnew @response.txt': + + 4,307.46 task-clock # 0.999 CPUs utilized + 70 context-switches # 0.016 K/sec + 1 cpu-migrations # 0.000 K/sec + 53,770 page-faults # 0.012 M/sec + cycles + instructions + branches + branch-misses + + 4.310496496 seconds time elapsed + + 4.048564000 seconds user + 0.258972000 seconds sys +""" + + expected = { + "seconds-elapsed": 4.310496496, + "task-clock": 4307.46, + "context-switches": 70, + "cpu-migrations": 1, + "page-faults": 53770, + } + + self.assertEqual(benchmark.parsePerf(with_units), expected) + self.assertEqual(benchmark.parsePerf(without_units), expected) diff --git a/lld/utils/benchmark.py b/lld/utils/benchmark.py --- a/lld/utils/benchmark.py +++ b/lld/utils/benchmark.py @@ -31,7 +31,7 @@ parser.add_argument('--threads', action='store_true') parser.add_argument('--url', help='The lnt server url to send the results to', default='http://localhost:8000/db_default/v4/link/submitRun') -args = parser.parse_args() +global args class Bench: def __init__(self, directory, variant): @@ -62,7 +62,8 @@ line = line.split(b'#')[0].strip() if len(line) != 0: p = line.split() - ret[p[1].strip().decode('ascii')] = parsePerfNum(p[0]) + metric_name = p[1 if len(p) < 3 else 2].strip() + ret[metric_name.decode('ascii')] = parsePerfNum(p[0]) return ret def parsePerf(output): @@ -136,6 +137,8 @@ data2 = urlencode({ 'input_data' : data }).encode('ascii') urlopen(Request(args.url, data2)) -os.chdir(args.benchmark_directory) -data = buildLntJson(getBenchmarks()) -submitToServer(data) +if __name__ == '__main__': + args = parser.parse_args() + os.chdir(args.benchmark_directory) + data = buildLntJson(getBenchmarks()) + submitToServer(data)