diff --git a/llvm/utils/lit/lit/cl_arguments.py b/llvm/utils/lit/lit/cl_arguments.py --- a/llvm/utils/lit/lit/cl_arguments.py +++ b/llvm/utils/lit/lit/cl_arguments.py @@ -65,12 +65,18 @@ dest="useProgressBar", help="Do not use curses based progress bar", action="store_false") - format_group.add_argument("--show-unsupported", - help="Show unsupported tests", - action="store_true") - format_group.add_argument("--show-xfail", - help="Show tests that were expected to fail", - action="store_true") + + # Note: this does not generate flags for user-defined result codes. + success_codes = [c for c in lit.Test.ResultCode.all_codes() + if not c.isFailure] + for code in success_codes: + format_group.add_argument( + "--show-{}".format(code.name.lower()), + dest="shown_codes", + help="Show {} tests ({})".format(code.label.lower(), code.name), + action="append_const", + const=code, + default=[]) execution_group = parser.add_argument_group("Test Execution") execution_group.add_argument("--path", @@ -187,12 +193,6 @@ else: opts.shard = None - opts.show_results = set() - if opts.show_unsupported: - opts.show_results.add(lit.Test.UNSUPPORTED) - if opts.show_xfail: - opts.show_results.add(lit.Test.XFAIL) - opts.reports = filter(None, [opts.output, opts.xunit_xml_output]) return opts diff --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py --- a/llvm/utils/lit/lit/main.py +++ b/llvm/utils/lit/lit/main.py @@ -265,15 +265,15 @@ tests_by_code[test.result.code].append(test) for code in lit.Test.ResultCode.all_codes(): - print_group(tests_by_code[code], code, opts.show_results) + print_group(tests_by_code[code], code, opts.shown_codes) print_summary(tests_by_code, opts.quiet, elapsed) -def print_group(tests, code, show_results): +def print_group(tests, code, shown_codes): if not tests: return - if not code.isFailure and code not in show_results: + if not code.isFailure and code not in shown_codes: return print('*' * 20) print('{} Tests ({}):'.format(code.label, len(tests))) diff --git a/llvm/utils/lit/tests/Inputs/show-result-codes/fail.txt b/llvm/utils/lit/tests/Inputs/show-result-codes/fail.txt new file mode 100644 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/show-result-codes/fail.txt @@ -0,0 +1 @@ +RUN: false diff --git a/llvm/utils/lit/tests/Inputs/show-result-codes/lit.cfg b/llvm/utils/lit/tests/Inputs/show-result-codes/lit.cfg new file mode 100644 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/show-result-codes/lit.cfg @@ -0,0 +1,6 @@ +import lit.formats +config.name = 'show-result-codes' +config.suffixes = ['.txt'] +config.test_format = lit.formats.ShTest() +config.test_source_root = None +config.test_exec_root = None diff --git a/llvm/utils/lit/tests/Inputs/show-result-codes/pass.txt b/llvm/utils/lit/tests/Inputs/show-result-codes/pass.txt new file mode 100644 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/show-result-codes/pass.txt @@ -0,0 +1 @@ +RUN: true diff --git a/llvm/utils/lit/tests/Inputs/show-result-codes/unsupported.txt b/llvm/utils/lit/tests/Inputs/show-result-codes/unsupported.txt new file mode 100644 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/show-result-codes/unsupported.txt @@ -0,0 +1,2 @@ +REQUIRES: missing-feature +RUN: true diff --git a/llvm/utils/lit/tests/Inputs/show-result-codes/xfail.txt b/llvm/utils/lit/tests/Inputs/show-result-codes/xfail.txt new file mode 100644 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/show-result-codes/xfail.txt @@ -0,0 +1,2 @@ +XFAIL: * +RUN: false diff --git a/llvm/utils/lit/tests/show-result-codes.py b/llvm/utils/lit/tests/show-result-codes.py new file mode 100644 --- /dev/null +++ b/llvm/utils/lit/tests/show-result-codes.py @@ -0,0 +1,21 @@ +# Test the --show- {pass,unsupported,xfail,...} options. +# +# RUN: not %{lit} %{inputs}/show-result-codes | FileCheck %s --check-prefix=NONE +# RUN: not %{lit} %{inputs}/show-result-codes --show-unsupported | FileCheck %s --check-prefix=ONE +# RUN: not %{lit} %{inputs}/show-result-codes --show-pass --show-xfail | FileCheck %s --check-prefix=MULTIPLE + +# Failing tests are always shown +# NONE-NOT: Unsupported Tests (1) +# NONE-NOT: Passed Tests (1) +# NONE-NOT: Expectedly Failed Tests (1) +# NONE: Failed Tests (1) + +# ONE: Unsupported Tests (1) +# ONE-NOT: Passed Tests (1) +# ONE-NOT: Expectedly Failed Tests (1) +# ONE: Failed Tests (1) + +# MULTIPLE-NOT: Unsupported Tests (1) +# MULTIPLE: Passed Tests (1) +# MULTIPLE: Expectedly Failed Tests (1) +# MULTIPLE: Failed Tests (1)