Index: llvm/utils/lit/lit/Test.py =================================================================== --- llvm/utils/lit/lit/Test.py +++ llvm/utils/lit/lit/Test.py @@ -150,6 +150,8 @@ self.output = output # The wall timing to execute the test, if timing. self.elapsed = elapsed + self.start = None + self.pid = None # The metrics reported by this test. self.metrics = {} # The micro-test results reported by this test. Index: llvm/utils/lit/lit/cl_arguments.py =================================================================== --- llvm/utils/lit/lit/cl_arguments.py +++ llvm/utils/lit/lit/cl_arguments.py @@ -109,6 +109,9 @@ execution_group.add_argument("--xunit-xml-output", type=lit.reports.XunitReport, help="Write XUnit-compatible XML test reports to the specified file") + execution_group.add_argument("--time-trace-output", + type=lit.reports.TimeTraceReport, + help="Write Chrome tracing compatible JSON to the specified file") execution_group.add_argument("--timeout", dest="maxIndividualTestTime", help="Maximum time to spend running a single test (in seconds). " @@ -195,7 +198,7 @@ else: opts.shard = None - opts.reports = filter(None, [opts.output, opts.xunit_xml_output]) + opts.reports = filter(None, [opts.output, opts.xunit_xml_output, opts.time_trace_output]) return opts Index: llvm/utils/lit/lit/reports.py =================================================================== --- llvm/utils/lit/lit/reports.py +++ llvm/utils/lit/lit/reports.py @@ -136,3 +136,35 @@ if features: return 'Missing required feature(s): ' + ', '.join(features) return 'Unsupported configuration' + + +class TimeTraceReport(object): + def __init__(self, output_file): + self.output_file = output_file + self.skipped_codes = {lit.Test.EXCLUDED, + lit.Test.SKIPPED, lit.Test.UNSUPPORTED} + + def write_results(self, tests, elapsed): + # Find when first test started so we can make start times relative. + first_start_time = min([t.result.start for t in tests]) + events = [self._get_test_event( + x, first_start_time) for x in tests if x.result.code not in self.skipped_codes] + + json_data = {'traceEvents': events} + + with open(self.output_file, "w") as time_trace_file: + json.dump(json_data, time_trace_file, indent=2, sort_keys=True) + + def _get_test_event(self, test, first_start_time): + test_name = test.getFullName() + elapsed_time = test.result.elapsed or 0.0 + start_time = test.result.start - first_start_time if test.result.start else 0.0 + pid = test.result.pid or 0 + return { + 'pid': pid, + 'tid': 1, + 'ph': 'X', + 'ts': int(start_time * 1000000.), + 'dur': int(elapsed_time * 1000000.), + 'name': test_name, + } Index: llvm/utils/lit/lit/worker.py =================================================================== --- llvm/utils/lit/lit/worker.py +++ llvm/utils/lit/lit/worker.py @@ -6,6 +6,7 @@ and store it in global variables. This reduces the cost of each task. """ import contextlib +import os import signal import time import traceback @@ -65,6 +66,8 @@ start = time.time() result = _execute_test_handle_errors(test, lit_config) result.elapsed = time.time() - start + result.start = start + result.pid = os.getpid() return result Index: llvm/utils/lit/tests/time-trace-output.py =================================================================== --- /dev/null +++ llvm/utils/lit/tests/time-trace-output.py @@ -0,0 +1,15 @@ +# RUN: %{lit} -j 1 -v %{inputs}/test-data --time-trace-output %t.json > %t.out +# RUN: FileCheck < %t.json %s + +# CHECK: { +# CHECK-NEXT: "traceEvents": [ +# CHECK-NEXT: { +# CHECK-NEXT: "dur": +# CHECK-NEXT: "name": "test-data :: metrics.ini" +# CHECK-NEXT: "ph": "X" +# CHECK-NEXT: "pid" +# CHECK-NEXT: "tid": 1 +# CHECK-NEXT: "ts": +# CHECK-NEXT: } +# CHECK-NEXT: ] +# CHECK-NEXT: }