diff --git a/llvm/utils/lit/lit/Test.py b/llvm/utils/lit/lit/Test.py --- a/llvm/utils/lit/lit/Test.py +++ b/llvm/utils/lit/lit/Test.py @@ -37,6 +37,7 @@ UNSUPPORTED = ResultCode('UNSUPPORTED', False) TIMEOUT = ResultCode('TIMEOUT', True) SKIPPED = ResultCode('SKIPPED', False) +FILTERED = ResultCode('FILTERED', False) # Test metric values. diff --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py --- a/llvm/utils/lit/lit/main.py +++ b/llvm/utils/lit/lit/main.py @@ -57,6 +57,8 @@ opts.maxIndividualTestTime)) lit_config.maxIndividualTestTime = opts.maxIndividualTestTime + determine_order(discovered_tests, opts.order) + filtered_tests = [t for t in discovered_tests if opts.filter.search(t.getFullName())] if not filtered_tests: @@ -71,8 +73,6 @@ 'error.\n') sys.exit(2) - determine_order(filtered_tests, opts.order) - if opts.shard: (run, shards) = opts.shard filtered_tests = filter_by_shard(filtered_tests, run, shards, lit_config) @@ -83,6 +83,8 @@ filtered_tests = filtered_tests[:opts.max_tests] + mark_filtered(discovered_tests, filtered_tests) + opts.workers = min(len(filtered_tests), opts.workers) start = time.time() @@ -96,7 +98,7 @@ if opts.time_tests: print_histogram(executed_tests) - print_results(filtered_tests, elapsed, opts) + print_results(discovered_tests, elapsed, opts) if opts.output_path: #TODO(yln): pass in discovered_tests @@ -111,7 +113,7 @@ if lit_config.numWarnings: sys.stderr.write('\n%d warning(s) in tests\n' % lit_config.numWarnings) - has_failure = any(t.isFailure() for t in executed_tests) + has_failure = any(t.isFailure() for t in discovered_tests) if has_failure: sys.exit(1) @@ -189,6 +191,13 @@ return selected_tests +def mark_filtered(discovered_tests, filtered_tests): + filtered_out = set(discovered_tests) - set(filtered_tests) + result = lit.Test.Result(lit.Test.FILTERED) + for t in filtered_out: + t.setResult(result) + + def run_tests(tests, lit_config, opts, discovered_tests): display = lit.display.create_display(opts, len(tests), discovered_tests, opts.workers) @@ -261,6 +270,7 @@ ] all_codes = [ + (lit.Test.FILTERED, 'Filtered Tests', 'Filtered'), (lit.Test.SKIPPED, 'Skipped Tests', 'Skipped'), (lit.Test.UNSUPPORTED, 'Unsupported Tests', 'Unsupported'), (lit.Test.PASS, 'Expected Passes', ''), @@ -284,7 +294,7 @@ if not tests: return # TODO(yln): FLAKYPASS? Make this more consistent! - if code in {lit.Test.SKIPPED, lit.Test.PASS}: + if code in {lit.Test.FILTERED, lit.Test.SKIPPED, lit.Test.PASS}: return if (lit.Test.XFAIL == code and not opts.show_xfail) or \ (lit.Test.UNSUPPORTED == code and not opts.show_unsupported): diff --git a/llvm/utils/lit/tests/selecting.py b/llvm/utils/lit/tests/selecting.py --- a/llvm/utils/lit/tests/selecting.py +++ b/llvm/utils/lit/tests/selecting.py @@ -22,12 +22,14 @@ # RUN: %{lit} --filter 'O[A-Z]E' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s # RUN: env LIT_FILTER='o[a-z]e' %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s # CHECK-FILTER: Testing: 2 of 5 tests +# CHECK-FILTER: Filtered Tests : 3 # Check that maximum counts work # # RUN: %{lit} --max-tests 3 %{inputs}/discovery | FileCheck --check-prefix=CHECK-MAX %s # CHECK-MAX: Testing: 3 of 5 tests +# CHECK-MAX: Filtered Tests : 2 # Check that sharding partitions the testsuite in a way that distributes the @@ -38,6 +40,7 @@ # RUN: FileCheck --check-prefix=CHECK-SHARD0-OUT < %t.out %s # CHECK-SHARD0-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4] # CHECK-SHARD0-OUT: Testing: 2 of 5 tests +# CHECK-SHARD0-OUT: Filtered Tests : 3 # # RUN: %{lit} --num-shards 3 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err # RUN: FileCheck --check-prefix=CHECK-SHARD1-ERR < %t.err %s