Index: llvm/utils/lit/lit/Test.py =================================================================== --- llvm/utils/lit/lit/Test.py +++ llvm/utils/lit/lit/Test.py @@ -150,6 +150,8 @@ self.output = output # The wall timing to execute the test, if timing. self.elapsed = elapsed + self.start = None + self.pid = None # The metrics reported by this test. self.metrics = {} # The micro-test results reported by this test. Index: llvm/utils/lit/lit/cl_arguments.py =================================================================== --- llvm/utils/lit/lit/cl_arguments.py +++ llvm/utils/lit/lit/cl_arguments.py @@ -109,6 +109,9 @@ execution_group.add_argument("--xunit-xml-output", type=lit.reports.XunitReport, help="Write XUnit-compatible XML test reports to the specified file") + execution_group.add_argument("--time-trace-output", + type=lit.reports.TimeTraceReport, + help="Write Chrome tracing compatible JSON to the specified file") execution_group.add_argument("--timeout", dest="maxIndividualTestTime", help="Maximum time to spend running a single test (in seconds). " @@ -195,7 +198,7 @@ else: opts.shard = None - opts.reports = filter(None, [opts.output, opts.xunit_xml_output]) + opts.reports = filter(None, [opts.output, opts.xunit_xml_output, opts.time_trace_output]) return opts Index: llvm/utils/lit/lit/reports.py =================================================================== --- llvm/utils/lit/lit/reports.py +++ llvm/utils/lit/lit/reports.py @@ -136,3 +136,36 @@ if features: return 'Missing required feature(s): ' + ', '.join(features) return 'Unsupported configuration' + +class TimeTraceReport(object): + def __init__(self, output_file): + self.output_file = output_file + self.skipped_codes = {lit.Test.EXCLUDED, + lit.Test.SKIPPED, lit.Test.UNSUPPORTED} + + # TODO(yln): elapsed unused, put it somewhere? + def write_results(self, tests, elapsed): + events = [self._get_test_event(x) for x in tests] + first_timestamp = min(e['ts'] for e in events) + #Rebase all timestamps which were stored as absolute times + for e in events: + e['ts'] = e['ts'] - first_timestamp + + json_struct = {'traceEvents': events} + + with open(self.output_file, "w") as time_trace_file: + json.dump(json_struct, time_trace_file, indent=2) + + def _get_test_event(self, test): + test_name = test.getFullName() + elapsed_time = test.result.elapsed if test.result.elapsed is not None else 0.0 + start_time = test.result.start if test.result.start is not None else 0.0 + pid = test.result.pid if test.result.pid is not None else 0 + return { + 'pid': pid, + 'tid': 1, + 'ph': 'X', + 'ts': int(start_time * 1000000.), + 'dur': int(elapsed_time * 1000000.), + 'name': test_name, + } Index: llvm/utils/lit/lit/worker.py =================================================================== --- llvm/utils/lit/lit/worker.py +++ llvm/utils/lit/lit/worker.py @@ -6,6 +6,7 @@ and store it in global variables. This reduces the cost of each task. """ import contextlib +import os import signal import time import traceback @@ -65,6 +66,8 @@ start = time.time() result = _execute_test_handle_errors(test, lit_config) result.elapsed = time.time() - start + result.start = start + result.pid = os.getpid() return result