diff --git a/llvm/utils/lit/lit/Test.py b/llvm/utils/lit/lit/Test.py --- a/llvm/utils/lit/lit/Test.py +++ b/llvm/utils/lit/lit/Test.py @@ -37,6 +37,7 @@ UNSUPPORTED = ResultCode('UNSUPPORTED', False) TIMEOUT = ResultCode('TIMEOUT', True) SKIPPED = ResultCode('SKIPPED', False) +EXCLUDED = ResultCode('EXCLUDED', False) # Test metric values. diff --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py --- a/llvm/utils/lit/lit/main.py +++ b/llvm/utils/lit/lit/main.py @@ -57,9 +57,11 @@ opts.maxIndividualTestTime)) lit_config.maxIndividualTestTime = opts.maxIndividualTestTime - filtered_tests = [t for t in discovered_tests if + determine_order(discovered_tests, opts.order) + + selected_tests = [t for t in discovered_tests if opts.filter.search(t.getFullName())] - if not filtered_tests: + if not selected_tests: sys.stderr.write('error: filter did not match any tests ' '(of %d discovered). ' % len(discovered_tests)) if opts.allow_empty_runs: @@ -71,30 +73,30 @@ 'error.\n') sys.exit(2) - determine_order(filtered_tests, opts.order) - if opts.shard: (run, shards) = opts.shard - filtered_tests = filter_by_shard(filtered_tests, run, shards, lit_config) - if not filtered_tests: + selected_tests = filter_by_shard(selected_tests, run, shards, lit_config) + if not selected_tests: sys.stderr.write('warning: shard does not contain any tests. ' 'Consider decreasing the number of shards.\n') sys.exit(0) - filtered_tests = filtered_tests[:opts.max_tests] + selected_tests = selected_tests[:opts.max_tests] + + mark_excluded(discovered_tests, selected_tests) start = time.time() - run_tests(filtered_tests, lit_config, opts, len(discovered_tests)) + run_tests(selected_tests, lit_config, opts, len(discovered_tests)) elapsed = time.time() - start # TODO(yln): eventually, all functions below should act on discovered_tests executed_tests = [ - t for t in filtered_tests if t.result.code != lit.Test.SKIPPED] + t for t in selected_tests if t.result.code != lit.Test.SKIPPED] if opts.time_tests: print_histogram(executed_tests) - print_results(filtered_tests, elapsed, opts) + print_results(discovered_tests, elapsed, opts) if opts.output_path: #TODO(yln): pass in discovered_tests @@ -109,7 +111,7 @@ if lit_config.numWarnings: sys.stderr.write('\n%d warning(s) in tests\n' % lit_config.numWarnings) - has_failure = any(t.isFailure() for t in executed_tests) + has_failure = any(t.isFailure() for t in discovered_tests) if has_failure: sys.exit(1) @@ -187,6 +189,13 @@ return selected_tests +def mark_excluded(discovered_tests, selected_tests): + excluded_tests = set(discovered_tests) - set(selected_tests) + result = lit.Test.Result(lit.Test.EXCLUDED) + for t in excluded_tests: + t.setResult(result) + + def run_tests(tests, lit_config, opts, discovered_tests): workers = min(len(tests), opts.workers) display = lit.display.create_display(opts, len(tests), discovered_tests, @@ -261,6 +270,7 @@ # Status code, summary label, group label result_codes = [ # Passes + (lit.Test.EXCLUDED, 'Excluded Tests', 'Excluded'), (lit.Test.SKIPPED, 'Skipped Tests', 'Skipped'), (lit.Test.UNSUPPORTED, 'Unsupported Tests', 'Unsupported'), (lit.Test.PASS, 'Expected Passes', ''), @@ -289,7 +299,7 @@ if not tests: return # TODO(yln): FLAKYPASS? Make this more consistent! - if code in {lit.Test.SKIPPED, lit.Test.PASS}: + if code in {lit.Test.EXCLUDED, lit.Test.SKIPPED, lit.Test.PASS}: return if (lit.Test.XFAIL == code and not opts.show_xfail) or \ (lit.Test.UNSUPPORTED == code and not opts.show_unsupported): diff --git a/llvm/utils/lit/tests/selecting.py b/llvm/utils/lit/tests/selecting.py --- a/llvm/utils/lit/tests/selecting.py +++ b/llvm/utils/lit/tests/selecting.py @@ -22,12 +22,14 @@ # RUN: %{lit} --filter 'O[A-Z]E' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s # RUN: env LIT_FILTER='o[a-z]e' %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s # CHECK-FILTER: Testing: 2 of 5 tests +# CHECK-FILTER: Excluded Tests : 3 # Check that maximum counts work # # RUN: %{lit} --max-tests 3 %{inputs}/discovery | FileCheck --check-prefix=CHECK-MAX %s # CHECK-MAX: Testing: 3 of 5 tests +# CHECK-MAX: Excluded Tests : 2 # Check that sharding partitions the testsuite in a way that distributes the @@ -38,6 +40,7 @@ # RUN: FileCheck --check-prefix=CHECK-SHARD0-OUT < %t.out %s # CHECK-SHARD0-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4] # CHECK-SHARD0-OUT: Testing: 2 of 5 tests +# CHECK-SHARD0-OUT: Excluded Tests : 3 # # RUN: %{lit} --num-shards 3 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err # RUN: FileCheck --check-prefix=CHECK-SHARD1-ERR < %t.err %s