Index: llvm/utils/lit/lit/cl_arguments.py
===================================================================
--- llvm/utils/lit/lit/cl_arguments.py
+++ llvm/utils/lit/lit/cl_arguments.py
@@ -69,6 +69,11 @@
type=lit.reports.JsonReport,
help="Write test results to the provided path",
metavar="PATH")
+ format_group.add_argument("--output-format",
+ dest="output_format",
+ choices=["json", "resultdb"],
+ default="json",
+ help="Set the output format, default is \"json\"")
format_group.add_argument("--no-progress-bar",
dest="useProgressBar",
help="Do not use curses based progress bar",
Index: llvm/utils/lit/lit/main.py
===================================================================
--- llvm/utils/lit/lit/main.py
+++ llvm/utils/lit/lit/main.py
@@ -114,7 +114,10 @@
print_results(discovered_tests, elapsed, opts)
for report in opts.reports:
- report.write_results(tests_for_report, elapsed)
+ if opts.output_format == "json":
+ report.write_results(tests_for_report, elapsed)
+ elif opts.output_format == "resultdb":
+ report.write_resultdb_json(tests_for_report, elapsed)
if lit_config.numErrors:
sys.stderr.write('\n%d error(s) in tests\n' % lit_config.numErrors)
Index: llvm/utils/lit/lit/reports.py
===================================================================
--- llvm/utils/lit/lit/reports.py
+++ llvm/utils/lit/lit/reports.py
@@ -1,3 +1,5 @@
+import base64
+import datetime
import itertools
import json
@@ -11,11 +13,89 @@
# key to avoid mixing tests of different suites.
return (test.suite.name, id(test.suite), test.path_in_suite)
+def gen_resultdb_test_entry(
+ test_name, start_time, elapsed_time, test_output, result_code, is_expected
+):
+ test_data = {
+ 'testId': test_name,
+ 'start_time': datetime.datetime.fromtimestamp(start_time).isoformat() + 'Z',
+ 'duration': '%.9fs' % elapsed_time,
+ 'summary_html': '
',
+ 'artifacts': {
+ 'artifact-content-in-request': {
+ 'contents': base64.b64encode(test_output.encode('utf-8')).decode(
+ 'utf-8'
+ ),
+ },
+ },
+ 'expected': is_expected,
+ }
+ if (
+ result_code == lit.Test.PASS
+ or result_code == lit.Test.XPASS
+ or result_code == lit.Test.FLAKYPASS
+ ):
+ test_data['status'] = 'PASS'
+ elif result_code == lit.Test.FAIL or result_code == lit.Test.XFAIL:
+ test_data['status'] = 'FAIL'
+ elif (
+ result_code == lit.Test.UNSUPPORTED
+ or result_code == lit.Test.SKIPPED
+ or result_code == lit.Test.EXCLUDED
+ ):
+ test_data['status'] = 'SKIP'
+ elif result_code == lit.Test.UNRESOLVED or result_code == lit.Test.TIMEOUT:
+ test_data['status'] = 'ABORT'
+ return test_data
+
class JsonReport(object):
def __init__(self, output_file):
self.output_file = output_file
+ def write_resultdb_json(self, tests, elapsed):
+ unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
+ tests = [t for t in tests if t.result.code not in unexecuted_codes]
+ data = {}
+ data['__version__'] = lit.__versioninfo__
+ data['elapsed'] = elapsed
+ # Encode the tests.
+ data['tests'] = tests_data = []
+ for test in tests:
+ tests_data.append(
+ gen_resultdb_test_entry(
+ test_name=test.getFullName(),
+ start_time=test.result.start,
+ elapsed_time=test.result.elapsed,
+ test_output=test.result.output,
+ result_code=test.result.code,
+ is_expected=not test.result.code.isFailure,
+ )
+ )
+ if test.result.microResults:
+ for key, micro_test in test.result.microResults.items():
+ # Expand parent test name with micro test name
+ parent_name = test.getFullName()
+ micro_full_name = parent_name + ':' + key + 'microres'
+ tests_data.append(
+ gen_resultdb_test_entry(
+ test_name=micro_full_name,
+ start_time=micro_test.start
+ if micro_test.start
+ else test.result.start,
+ elapsed_time=micro_test.elapsed
+ if micro_test.elapsed
+ else test.result.elapsed,
+ test_output=micro_test.output,
+ result_code=micro_test.code,
+ is_expected=not micro_test.code.isFailure,
+ )
+ )
+
+ with open(self.output_file, 'w') as file:
+ json.dump(data, file, indent=2, sort_keys=True)
+ file.write('\n')
+
def write_results(self, tests, elapsed):
unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
tests = [t for t in tests if t.result.code not in unexecuted_codes]
Index: llvm/utils/lit/tests/test-output-micro-resultdb.py
===================================================================
--- /dev/null
+++ llvm/utils/lit/tests/test-output-micro-resultdb.py
@@ -0,0 +1,63 @@
+# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --output %t.results.out --output-format=resultdb
+# RUN: FileCheck < %t.results.out %s
+# RUN: rm %t.results.out
+
+
+# CHECK: {
+# CHECK: "__version__"
+# CHECK: "elapsed"
+# CHECK-NEXT: "tests": [
+# CHECK-NEXT: {
+# CHECK-NEXT: "artifacts": {
+# CHECK-NEXT: "artifact-content-in-request": {
+# CHECK-NEXT: "contents": "VGVzdCBwYXNzZWQu"
+# CHECK-NEXT: }
+# CHECK-NEXT: },
+# CHECK-NEXT: "duration"
+# CHECK-NEXT: "expected": true,
+# CHECK-NEXT: "start_time"
+# CHECK-NEXT: "status": "PASS",
+# CHECK-NEXT: "summary_html": "
",
+# CHECK-NEXT: "testId": "test-data-micro :: micro-tests.ini"
+# CHECK-NEXT: },
+# CHECK-NEXT: {
+# CHECK-NEXT: "artifacts": {
+# CHECK-NEXT: "artifact-content-in-request": {
+# CHECK-NEXT: "contents": ""
+# CHECK-NEXT: }
+# CHECK-NEXT: },
+# CHECK-NEXT: "duration"
+# CHECK-NEXT: "expected": true,
+# CHECK-NEXT: "start_time"
+# CHECK-NEXT: "status": "PASS",
+# CHECK-NEXT: "summary_html": "
",
+# CHECK-NEXT: "testId": "test-data-micro :: micro-tests.ini:test0microres"
+# CHECK-NEXT: },
+# CHECK-NEXT: {
+# CHECK-NEXT: "artifacts": {
+# CHECK-NEXT: "artifact-content-in-request": {
+# CHECK-NEXT: "contents": ""
+# CHECK-NEXT: }
+# CHECK-NEXT: },
+# CHECK-NEXT: "duration"
+# CHECK-NEXT: "expected": true,
+# CHECK-NEXT: "start_time"
+# CHECK-NEXT: "status": "PASS",
+# CHECK-NEXT: "summary_html": "
",
+# CHECK-NEXT: "testId": "test-data-micro :: micro-tests.ini:test1microres"
+# CHECK-NEXT: },
+# CHECK-NEXT: {
+# CHECK-NEXT: "artifacts": {
+# CHECK-NEXT: "artifact-content-in-request": {
+# CHECK-NEXT: "contents": ""
+# CHECK-NEXT: }
+# CHECK-NEXT: },
+# CHECK-NEXT: "duration"
+# CHECK-NEXT: "expected": true,
+# CHECK-NEXT: "start_time"
+# CHECK-NEXT: "status": "PASS",
+# CHECK-NEXT: "summary_html": "
",
+# CHECK-NEXT: "testId": "test-data-micro :: micro-tests.ini:test2microres"
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
+# CHECK-NEXT: }
Index: llvm/utils/lit/tests/test-output-resultdb.py
===================================================================
--- /dev/null
+++ llvm/utils/lit/tests/test-output-resultdb.py
@@ -0,0 +1,22 @@
+# RUN: %{lit} -j 1 -v %{inputs}/test-data --output %t.results.out --output-format=resultdb > %t.out
+# RUN: FileCheck < %t.results.out %s
+
+# CHECK: {
+# CHECK: "__version__"
+# CHECK: "elapsed"
+# CHECK-NEXT: "tests": [
+# CHECK-NEXT: {
+# CHECK-NEXT: "artifacts": {
+# CHECK-NEXT: "artifact-content-in-request": {
+# CHECK-NEXT: "contents": "VGVzdCBwYXNzZWQu"
+# CHECK-NEXT: }
+# CHECK-NEXT: },
+# CHECK-NEXT: "duration"
+# CHECK-NEXT: "expected": true,
+# CHECK-NEXT: "start_time"
+# CHECK-NEXT: "status": "PASS",
+# CHECK-NEXT: "summary_html": "
",
+# CHECK-NEXT: "testId": "test-data :: metrics.ini"
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
+# CHECK-NEXT: }