Index: llvm/utils/lit/lit/cl_arguments.py =================================================================== --- llvm/utils/lit/lit/cl_arguments.py +++ llvm/utils/lit/lit/cl_arguments.py @@ -8,11 +8,6 @@ import lit.util -class TestOrder(enum.Enum): - DEFAULT = enum.auto() - RANDOM = enum.auto() - - def parse_args(): parser = argparse.ArgumentParser(prog='lit') parser.add_argument('test_paths', @@ -150,11 +145,17 @@ metavar="N", help="Maximum time to spend testing (in seconds)", type=_positive_int) + selection_group.add_argument("--order", + choices=["lexical", "random", "smart"], + default="smart", + help="Test order to use (default: smart)") selection_group.add_argument("--shuffle", - help="Run tests in random order", - action="store_true") + dest="order", + help="Run tests in random order (DEPRECATED: use --order=random)", + action="store_const", + const="random") selection_group.add_argument("-i", "--incremental", - help="Run failed tests first (DEPRECATED: now always enabled)", + help="Run failed tests first (DEPRECATED: use --order=smart)", action="store_true") selection_group.add_argument("--filter", metavar="REGEX", @@ -215,11 +216,6 @@ if opts.incremental: print('WARNING: --incremental is deprecated. Failing tests now always run first.') - if opts.shuffle: - opts.order = TestOrder.RANDOM - else: - opts.order = TestOrder.DEFAULT - if opts.numShards or opts.runShard: if not opts.numShards or not opts.runShard: parser.error("--num-shards and --run-shard must be used together") Index: llvm/utils/lit/lit/main.py =================================================================== --- llvm/utils/lit/lit/main.py +++ llvm/utils/lit/lit/main.py @@ -165,12 +165,13 @@ def determine_order(tests, order): - from lit.cl_arguments import TestOrder - if order == TestOrder.RANDOM: + if order == "random": import random random.shuffle(tests) + elif order == "lexical": + tests.sort(key=lambda t: t.getFullName()) else: - assert order == TestOrder.DEFAULT, 'Unknown TestOrder value' + assert order == "smart", 'Unknown TestOrder value' tests.sort(key=lambda t: (not t.previous_failure, -t.previous_elapsed, t.getFullName())) Index: llvm/utils/lit/tests/allow-retries.py =================================================================== --- llvm/utils/lit/tests/allow-retries.py +++ llvm/utils/lit/tests/allow-retries.py @@ -4,32 +4,32 @@ # only succeed the fourth time it is retried. # # RUN: rm -f %t.counter -# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST1 %s +# RUN: %{lit} %{inputs}/allow-retries/succeeds-within-limit.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST1 %s # CHECK-TEST1: Passed With Retry: 1 # Test that a per-file ALLOW_RETRIES overwrites the config-wide test_retry_attempts property, if any. # # RUN: rm -f %t.counter -# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dtest_retry_attempts=2 -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST2 %s +# RUN: %{lit} %{inputs}/allow-retries/succeeds-within-limit.py -Dtest_retry_attempts=2 -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST2 %s # CHECK-TEST2: Passed With Retry: 1 # This test does not succeed within the allowed retry limit # -# RUN: not %{lit} -j 1 %{inputs}/allow-retries/does-not-succeed-within-limit.py | FileCheck --check-prefix=CHECK-TEST3 %s +# RUN: not %{lit} %{inputs}/allow-retries/does-not-succeed-within-limit.py | FileCheck --check-prefix=CHECK-TEST3 %s # CHECK-TEST3: Failed Tests (1): # CHECK-TEST3: allow-retries :: does-not-succeed-within-limit.py # This test should be UNRESOLVED since it has more than one ALLOW_RETRIES # lines, and that is not allowed. # -# RUN: not %{lit} -j 1 %{inputs}/allow-retries/more-than-one-allow-retries-lines.py | FileCheck --check-prefix=CHECK-TEST4 %s +# RUN: not %{lit} %{inputs}/allow-retries/more-than-one-allow-retries-lines.py | FileCheck --check-prefix=CHECK-TEST4 %s # CHECK-TEST4: Unresolved Tests (1): # CHECK-TEST4: allow-retries :: more-than-one-allow-retries-lines.py # This test does not provide a valid integer to the ALLOW_RETRIES keyword. # It should be unresolved. # -# RUN: not %{lit} -j 1 %{inputs}/allow-retries/not-a-valid-integer.py | FileCheck --check-prefix=CHECK-TEST5 %s +# RUN: not %{lit} %{inputs}/allow-retries/not-a-valid-integer.py | FileCheck --check-prefix=CHECK-TEST5 %s # CHECK-TEST5: Unresolved Tests (1): # CHECK-TEST5: allow-retries :: not-a-valid-integer.py @@ -37,5 +37,5 @@ # when no ALLOW_RETRIES keyword is present. # # RUN: rm -f %t.counter -# RUN: %{lit} -j 1 %{inputs}/test_retry_attempts/test.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST6 %s +# RUN: %{lit} %{inputs}/test_retry_attempts/test.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST6 %s # CHECK-TEST6: Passed With Retry: 1 Index: llvm/utils/lit/tests/custom-result-category.py =================================================================== --- llvm/utils/lit/tests/custom-result-category.py +++ llvm/utils/lit/tests/custom-result-category.py @@ -1,10 +1,7 @@ # UNSUPPORTED: system-windows # Test lit.main.add_result_category() extension API. -# FIXME: this test depends on order of tests -# RUN: rm -f %{inputs}/custom-result-category/.lit_test_times.txt - -# RUN: not %{lit} -j 1 %{inputs}/custom-result-category | FileCheck %s +# RUN: not %{lit} %{inputs}/custom-result-category | FileCheck %s # CHECK: CUSTOM_PASS: custom-result-category :: test1.txt # CHECK: CUSTOM_FAILURE: custom-result-category :: test2.txt Index: llvm/utils/lit/tests/discovery.py =================================================================== --- llvm/utils/lit/tests/discovery.py +++ llvm/utils/lit/tests/discovery.py @@ -1,7 +1,7 @@ # Check the basic discovery process, including a sub-suite. # # RUN: %{lit} %{inputs}/discovery \ -# RUN: -j 1 --debug --show-tests --show-suites \ +# RUN: --debug --show-tests --show-suites \ # RUN: -v > %t.out 2> %t.err # RUN: FileCheck --check-prefix=CHECK-BASIC-OUT < %t.out %s # RUN: FileCheck --check-prefix=CHECK-BASIC-ERR < %t.err %s @@ -56,7 +56,7 @@ # RUN: %{lit} \ # RUN: %{inputs}/discovery/subdir/test-three.py \ # RUN: %{inputs}/discovery/subsuite/test-one.txt \ -# RUN: -j 1 --show-tests --show-suites -v > %t.out +# RUN: --show-tests --show-suites -v > %t.out # RUN: FileCheck --check-prefix=CHECK-DIRECT-TEST < %t.out %s # # CHECK-DIRECT-TEST: -- Available Tests -- @@ -65,7 +65,7 @@ # Check discovery when config files end in .py # RUN: %{lit} %{inputs}/py-config-discovery \ -# RUN: -j 1 --debug --show-tests --show-suites \ +# RUN: --debug --show-tests --show-suites \ # RUN: -v > %t.out 2> %t.err # RUN: FileCheck --check-prefix=CHECK-PYCONFIG-OUT < %t.out %s # RUN: FileCheck --check-prefix=CHECK-PYCONFIG-ERR < %t.err %s @@ -95,7 +95,7 @@ # Check discovery when using an exec path. # # RUN: %{lit} %{inputs}/exec-discovery \ -# RUN: -j 1 --debug --show-tests --show-suites \ +# RUN: --debug --show-tests --show-suites \ # RUN: -v > %t.out 2> %t.err # RUN: FileCheck --check-prefix=CHECK-ASEXEC-OUT < %t.out %s # RUN: FileCheck --check-prefix=CHECK-ASEXEC-ERR < %t.err %s @@ -128,7 +128,7 @@ # # RUN: %{lit} \ # RUN: %{inputs}/exec-discovery/subdir/test-three.py \ -# RUN: -j 1 --show-tests --show-suites -v > %t.out +# RUN: --show-tests --show-suites -v > %t.out # RUN: FileCheck --check-prefix=CHECK-ASEXEC-DIRECT-TEST < %t.out %s # # CHECK-ASEXEC-DIRECT-TEST: -- Available Tests -- @@ -138,7 +138,7 @@ # indirectly (e.g. when the directory containing the test is specified). # # RUN: not %{lit} \ -# RUN: %{inputs}/discovery/test.not-txt -j 1 2>%t.err +# RUN: %{inputs}/discovery/test.not-txt 2>%t.err # RUN: FileCheck --check-prefix=CHECK-ERROR-INDIRECT-RUN-CHECK < %t.err %s # # CHECK-ERROR-INDIRECT-RUN-CHECK: error: 'top-level-suite :: test.not-txt' would not be run indirectly @@ -146,11 +146,11 @@ # Check that no error is emitted with --no-indirectly-run-check. # # RUN: %{lit} \ -# RUN: %{inputs}/discovery/test.not-txt -j 1 --no-indirectly-run-check +# RUN: %{inputs}/discovery/test.not-txt --no-indirectly-run-check # Check that a standalone test with no suffixes set is run without any errors. # -# RUN: %{lit} %{inputs}/standalone-tests/true.txt -j 1 > %t.out +# RUN: %{lit} %{inputs}/standalone-tests/true.txt > %t.out # RUN: FileCheck --check-prefix=CHECK-STANDALONE < %t.out %s # # CHECK-STANDALONE: PASS: Standalone tests :: true.txt @@ -158,7 +158,7 @@ # Check that an error is produced if suffixes variable is set for a suite with # standalone tests. # -# RUN: not %{lit} %{inputs}/standalone-tests-with-suffixes -j 1 2> %t.err +# RUN: not %{lit} %{inputs}/standalone-tests-with-suffixes 2> %t.err # RUN: FileCheck --check-prefixes=CHECK-STANDALONE-SUFFIXES,CHECK-STANDALONE-DISCOVERY < %t.err %s # # CHECK-STANDALONE-SUFFIXES: standalone_tests set {{.*}} but suffixes @@ -166,14 +166,14 @@ # Check that an error is produced if excludes variable is set for a suite with # standalone tests. # -# RUN: not %{lit} %{inputs}/standalone-tests-with-excludes -j 1 2> %t.err +# RUN: not %{lit} %{inputs}/standalone-tests-with-excludes 2> %t.err # RUN: FileCheck --check-prefixes=CHECK-STANDALONE-EXCLUDES,CHECK-STANDALONE-DISCOVERY < %t.err %s # # CHECK-STANDALONE-EXCLUDES: standalone_tests set {{.*}} but {{.*}} excludes # Check that no discovery is done for testsuite with standalone tests. # -# RUN: not %{lit} %{inputs}/standalone-tests -j 1 2>%t.err +# RUN: not %{lit} %{inputs}/standalone-tests 2>%t.err # RUN: FileCheck --check-prefix=CHECK-STANDALONE-DISCOVERY < %t.err %s # # CHECK-STANDALONE-DISCOVERY: error: did not discover any tests for provided path(s) @@ -183,14 +183,14 @@ # # RUN: %{lit} \ # RUN: %{inputs}/exec-discovery-in-tree/obj/ \ -# RUN: -j 1 --show-tests --show-suites -v > %t.out +# RUN: --show-tests --show-suites -v > %t.out # RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s # # Try it again after cd'ing into the test suite using a short relative path. # # RUN: cd %{inputs}/exec-discovery-in-tree/obj/ # RUN: %{lit} . \ -# RUN: -j 1 --show-tests --show-suites -v > %t.out +# RUN: --show-tests --show-suites -v > %t.out # RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s # # CHECK-ASEXEC-INTREE: exec-discovery-in-tree-suite - 1 tests Index: llvm/utils/lit/tests/googletest-discovery-failed.py =================================================================== --- llvm/utils/lit/tests/googletest-discovery-failed.py +++ llvm/utils/lit/tests/googletest-discovery-failed.py @@ -1,6 +1,6 @@ # Check for correct error message when discovery of tests fails. # -# RUN: not %{lit} -j 1 -v %{inputs}/googletest-discovery-failed > %t.cmd.out +# RUN: not %{lit} -v %{inputs}/googletest-discovery-failed > %t.cmd.out # RUN: FileCheck < %t.cmd.out %s Index: llvm/utils/lit/tests/googletest-format.py =================================================================== --- llvm/utils/lit/tests/googletest-format.py +++ llvm/utils/lit/tests/googletest-format.py @@ -1,9 +1,6 @@ # Check the various features of the GoogleTest format. -# FIXME: this test depends on order of tests -# RUN: rm -f %{inputs}/googletest-format/.lit_test_times.txt - -# RUN: not %{lit} -j 1 -v %{inputs}/googletest-format > %t.out +# RUN: not %{lit} -v %{inputs}/googletest-format > %t.out # FIXME: Temporarily dump test output so we can debug failing tests on # buildbots. # RUN: cat %t.out Index: llvm/utils/lit/tests/googletest-timeout.py =================================================================== --- llvm/utils/lit/tests/googletest-timeout.py +++ llvm/utils/lit/tests/googletest-timeout.py @@ -6,14 +6,14 @@ # Check that the per test timeout is enforced when running GTest tests. # -# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout \ +# RUN: not %{lit} -v %{inputs}/googletest-timeout \ # RUN: --filter=InfiniteLoopSubTest --timeout=1 > %t.cmd.out # RUN: FileCheck --check-prefix=CHECK-INF < %t.cmd.out %s # Check that the per test timeout is enforced when running GTest tests via # the configuration file # -# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout \ +# RUN: not %{lit} -v %{inputs}/googletest-timeout \ # RUN: --filter=InfiniteLoopSubTest --param set_timeout=1 \ # RUN: > %t.cfgset.out # RUN: FileCheck --check-prefix=CHECK-INF < %t.cfgset.out %s @@ -34,7 +34,7 @@ # 3600 second timeout. ############################################################################### -# RUN: %{lit} -j 1 -v %{inputs}/googletest-timeout \ +# RUN: %{lit} -v %{inputs}/googletest-timeout \ # RUN: --filter=QuickSubTest --timeout=3600 > %t.cmd.out # RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmd.out %s @@ -43,7 +43,7 @@ # Test per test timeout via a config file and on the command line. # The value set on the command line should override the config file. -# RUN: %{lit} -j 1 -v %{inputs}/googletest-timeout --filter=QuickSubTest \ +# RUN: %{lit} -v %{inputs}/googletest-timeout --filter=QuickSubTest \ # RUN: --param set_timeout=1 --timeout=3600 \ # RUN: > %t.cmdover.out 2> %t.cmdover.err # RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmdover.out %s Index: llvm/utils/lit/tests/googletest-upstream-format.py =================================================================== --- llvm/utils/lit/tests/googletest-upstream-format.py +++ llvm/utils/lit/tests/googletest-upstream-format.py @@ -1,9 +1,6 @@ # Check the various features of the GoogleTest format. -# FIXME: this test depends on order of tests -# RUN: rm -f %{inputs}/googletest-upstream-format/.lit_test_times.txt - -# RUN: not %{lit} -j 1 -v %{inputs}/googletest-upstream-format > %t.out +# RUN: not %{lit} -v %{inputs}/googletest-upstream-format > %t.out # RUN: FileCheck < %t.out %s # # END. Index: llvm/utils/lit/tests/ignore-fail.py =================================================================== --- llvm/utils/lit/tests/ignore-fail.py +++ llvm/utils/lit/tests/ignore-fail.py @@ -1,8 +1,8 @@ # Check that --ignore-fail produces exit status 0 despite various kinds of # test failures but doesn't otherwise suppress those failures. -# RUN: not %{lit} -j 1 %{inputs}/ignore-fail | FileCheck %s -# RUN: %{lit} -j 1 --ignore-fail %{inputs}/ignore-fail | FileCheck %s +# RUN: not %{lit} %{inputs}/ignore-fail | FileCheck %s +# RUN: %{lit} --ignore-fail %{inputs}/ignore-fail | FileCheck %s # END. Index: llvm/utils/lit/tests/lit-opts.py =================================================================== --- llvm/utils/lit/tests/lit-opts.py +++ llvm/utils/lit/tests/lit-opts.py @@ -1,25 +1,25 @@ # Check cases where LIT_OPTS has no effect. # -# RUN: %{lit} -j 1 -s %{inputs}/lit-opts | FileCheck %s -# RUN: env LIT_OPTS= %{lit} -j 1 -s %{inputs}/lit-opts | FileCheck %s -# RUN: env LIT_OPTS=-s %{lit} -j 1 -s %{inputs}/lit-opts | FileCheck %s +# RUN: %{lit} -s %{inputs}/lit-opts | FileCheck %s +# RUN: env LIT_OPTS= %{lit} -s %{inputs}/lit-opts | FileCheck %s +# RUN: env LIT_OPTS=-s %{lit} -s %{inputs}/lit-opts | FileCheck %s # Check that LIT_OPTS can override command-line options. # # RUN: env LIT_OPTS=-a \ -# RUN: %{lit} -j 1 -s %{inputs}/lit-opts \ +# RUN: %{lit} -s %{inputs}/lit-opts \ # RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR= %s # Check that LIT_OPTS understands multiple options with arbitrary spacing. # # RUN: env LIT_OPTS='-a -v -Dvar=foobar' \ -# RUN: %{lit} -j 1 -s %{inputs}/lit-opts \ +# RUN: %{lit} -s %{inputs}/lit-opts \ # RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR=foobar %s # Check that LIT_OPTS parses shell-like quotes and escapes. # # RUN: env LIT_OPTS='-a -v -Dvar="foo bar"\ baz' \ -# RUN: %{lit} -j 1 -s %{inputs}/lit-opts \ +# RUN: %{lit} -s %{inputs}/lit-opts \ # RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR="foo bar baz" %s # CHECK: Testing: 1 tests Index: llvm/utils/lit/tests/lit.cfg =================================================================== --- llvm/utils/lit/tests/lit.cfg +++ llvm/utils/lit/tests/lit.cfg @@ -57,11 +57,14 @@ # suites in %{inputs}. This test suite's results are then determined in part # by %{lit}'s textual output, which includes the output of FileCheck calls # within %{inputs}'s test suites. Thus, %{lit} clears environment variables -# that can affect FileCheck's output. +# that can affect FileCheck's output. It also includes "--order=lexical -j1" +# to ensure predictable test order, as it is often required for FileCheck +# matches. config.substitutions.append(('%{inputs}', os.path.join( config.test_source_root, 'Inputs'))) -config.substitutions.append(('%{lit}', - "{env} %{{python}} {lit}".format( +config.substitutions.append(('%{lit}', '%{lit-no-order-opt} --order=lexical')) +config.substitutions.append(('%{lit-no-order-opt}', + "{env} %{{python}} {lit} -j1".format( env="env -u FILECHECK_OPTS", lit=os.path.join(lit_path, 'lit.py')))) config.substitutions.append(('%{python}', '"%s"' % (sys.executable))) Index: llvm/utils/lit/tests/max-failures.py =================================================================== --- llvm/utils/lit/tests/max-failures.py +++ llvm/utils/lit/tests/max-failures.py @@ -2,10 +2,10 @@ # Check the behavior of --max-failures option. # -# RUN: not %{lit} -j 1 %{inputs}/max-failures > %t.out 2>&1 -# RUN: not %{lit} --max-failures=1 -j 1 %{inputs}/max-failures >> %t.out 2>&1 -# RUN: not %{lit} --max-failures=2 -j 1 %{inputs}/max-failures >> %t.out 2>&1 -# RUN: not %{lit} --max-failures=0 -j 1 %{inputs}/max-failures 2>> %t.out +# RUN: not %{lit} %{inputs}/max-failures > %t.out 2>&1 +# RUN: not %{lit} --max-failures=1 %{inputs}/max-failures >> %t.out 2>&1 +# RUN: not %{lit} --max-failures=2 %{inputs}/max-failures >> %t.out 2>&1 +# RUN: not %{lit} --max-failures=0 %{inputs}/max-failures 2>> %t.out # RUN: FileCheck < %t.out %s # Index: llvm/utils/lit/tests/progress-bar.py =================================================================== --- llvm/utils/lit/tests/progress-bar.py +++ llvm/utils/lit/tests/progress-bar.py @@ -1,9 +1,6 @@ # Check the simple progress bar. -# FIXME: this test depends on order of tests -# RUN: rm -f %{inputs}/progress-bar/.lit_test_times.txt - -# RUN: not %{lit} -j 1 -s %{inputs}/progress-bar > %t.out +# RUN: not %{lit} -s %{inputs}/progress-bar > %t.out # RUN: FileCheck < %t.out %s # # CHECK: Testing: Index: llvm/utils/lit/tests/reorder.py =================================================================== --- llvm/utils/lit/tests/reorder.py +++ llvm/utils/lit/tests/reorder.py @@ -1,7 +1,7 @@ ## Check that we can reorder test runs. # RUN: cp %{inputs}/reorder/.lit_test_times.txt %{inputs}/reorder/.lit_test_times.txt.orig -# RUN: %{lit} -j1 %{inputs}/reorder > %t.out +# RUN: %{lit-no-order-opt} %{inputs}/reorder > %t.out # RUN: cp %{inputs}/reorder/.lit_test_times.txt %{inputs}/reorder/.lit_test_times.txt.new # RUN: cp %{inputs}/reorder/.lit_test_times.txt.orig %{inputs}/reorder/.lit_test_times.txt # RUN: not diff %{inputs}/reorder/.lit_test_times.txt.new %{inputs}/reorder/.lit_test_times.txt.orig Index: llvm/utils/lit/tests/shtest-env.py =================================================================== --- llvm/utils/lit/tests/shtest-env.py +++ llvm/utils/lit/tests/shtest-env.py @@ -1,9 +1,6 @@ # Check the env command -# FIXME: this test depends on order of tests -# RUN: rm -f %{inputs}/shtest-env/.lit_test_times.txt - -# RUN: not %{lit} -j 1 -a -v %{inputs}/shtest-env \ +# RUN: not %{lit} -a -v %{inputs}/shtest-env \ # RUN: | FileCheck -match-full-lines %s # # END. Index: llvm/utils/lit/tests/shtest-format-argv0.py =================================================================== --- llvm/utils/lit/tests/shtest-format-argv0.py +++ llvm/utils/lit/tests/shtest-format-argv0.py @@ -7,7 +7,7 @@ # and is not installed under PATH by default. # UNSUPPORTED: system-aix # -# RUN: %{lit} -j 1 -v %{inputs}/shtest-format-argv0 | FileCheck %s +# RUN: %{lit} -v %{inputs}/shtest-format-argv0 | FileCheck %s # CHECK: -- Testing: # CHECK: PASS: shtest-format-argv0 :: argv0.txt Index: llvm/utils/lit/tests/shtest-format.py =================================================================== --- llvm/utils/lit/tests/shtest-format.py +++ llvm/utils/lit/tests/shtest-format.py @@ -1,10 +1,7 @@ # Check the various features of the ShTest format. -# FIXME: this test depends on order of tests -# RUN: rm -f %{inputs}/shtest-format/.lit_test_times.txt - # RUN: rm -f %t.xml -# RUN: not %{lit} -j 1 -v %{inputs}/shtest-format --xunit-xml-output %t.xml > %t.out +# RUN: not %{lit} -v %{inputs}/shtest-format --xunit-xml-output %t.xml > %t.out # RUN: FileCheck < %t.out %s # RUN: FileCheck --check-prefix=XUNIT < %t.xml %s Index: llvm/utils/lit/tests/shtest-inject.py =================================================================== --- llvm/utils/lit/tests/shtest-inject.py +++ llvm/utils/lit/tests/shtest-inject.py @@ -1,6 +1,6 @@ # Check that we can inject commands at the beginning of a ShTest. -# RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-empty.txt --show-all | FileCheck --check-prefix=CHECK-TEST1 %s +# RUN: %{lit} %{inputs}/shtest-inject/test-empty.txt --show-all | FileCheck --check-prefix=CHECK-TEST1 %s # # CHECK-TEST1: Script: # CHECK-TEST1: -- @@ -13,7 +13,7 @@ # # CHECK-TEST1: Passed: 1 -# RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-one.txt --show-all | FileCheck --check-prefix=CHECK-TEST2 %s +# RUN: %{lit} %{inputs}/shtest-inject/test-one.txt --show-all | FileCheck --check-prefix=CHECK-TEST2 %s # # CHECK-TEST2: Script: # CHECK-TEST2: -- @@ -28,7 +28,7 @@ # # CHECK-TEST2: Passed: 1 -# RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-many.txt --show-all | FileCheck --check-prefix=CHECK-TEST3 %s +# RUN: %{lit} %{inputs}/shtest-inject/test-many.txt --show-all | FileCheck --check-prefix=CHECK-TEST3 %s # # CHECK-TEST3: Script: # CHECK-TEST3: -- Index: llvm/utils/lit/tests/shtest-keyword-parse-errors.py =================================================================== --- llvm/utils/lit/tests/shtest-keyword-parse-errors.py +++ llvm/utils/lit/tests/shtest-keyword-parse-errors.py @@ -1,7 +1,4 @@ -# FIXME: this test depends on order of tests -# RUN: rm -f %{inputs}/shtest-keyword-parse-errors/.lit_test_times.txt - -# RUN: not %{lit} -j 1 -vv %{inputs}/shtest-keyword-parse-errors > %t.out +# RUN: not %{lit} -vv %{inputs}/shtest-keyword-parse-errors > %t.out # RUN: FileCheck -input-file %t.out %s # # END. Index: llvm/utils/lit/tests/shtest-not.py =================================================================== --- llvm/utils/lit/tests/shtest-not.py +++ llvm/utils/lit/tests/shtest-not.py @@ -1,9 +1,6 @@ # Check the not command -# FIXME: this test depends on order of tests -# RUN: rm -f %{inputs}/shtest-not/.lit_test_times.txt - -# RUN: not %{lit} -j 1 -a -v %{inputs}/shtest-not \ +# RUN: not %{lit} -a -v %{inputs}/shtest-not \ # RUN: | FileCheck -match-full-lines %s # # END. Index: llvm/utils/lit/tests/shtest-output-printing.py =================================================================== --- llvm/utils/lit/tests/shtest-output-printing.py +++ llvm/utils/lit/tests/shtest-output-printing.py @@ -1,6 +1,6 @@ # Check the various features of the ShTest format. # -# RUN: not %{lit} -j 1 -v %{inputs}/shtest-output-printing > %t.out +# RUN: not %{lit} -v %{inputs}/shtest-output-printing > %t.out # RUN: FileCheck --input-file %t.out %s # # END. Index: llvm/utils/lit/tests/shtest-recursive-substitution.py =================================================================== --- llvm/utils/lit/tests/shtest-recursive-substitution.py +++ llvm/utils/lit/tests/shtest-recursive-substitution.py @@ -1,27 +1,27 @@ # Check that the config.recursiveExpansionLimit is picked up and will cause # lit substitutions to be expanded recursively. -# RUN: %{lit} -j 1 %{inputs}/shtest-recursive-substitution/substitutes-within-limit --show-all | FileCheck --check-prefix=CHECK-TEST1 %s +# RUN: %{lit} %{inputs}/shtest-recursive-substitution/substitutes-within-limit --show-all | FileCheck --check-prefix=CHECK-TEST1 %s # CHECK-TEST1: PASS: substitutes-within-limit :: test.py # CHECK-TEST1: $ "echo" "STOP" -# RUN: not %{lit} -j 1 %{inputs}/shtest-recursive-substitution/does-not-substitute-within-limit --show-all | FileCheck --check-prefix=CHECK-TEST2 %s +# RUN: not %{lit} %{inputs}/shtest-recursive-substitution/does-not-substitute-within-limit --show-all | FileCheck --check-prefix=CHECK-TEST2 %s # CHECK-TEST2: UNRESOLVED: does-not-substitute-within-limit :: test.py # CHECK-TEST2: ValueError: Recursive substitution of -# RUN: %{lit} -j 1 %{inputs}/shtest-recursive-substitution/does-not-substitute-no-limit --show-all | FileCheck --check-prefix=CHECK-TEST3 %s +# RUN: %{lit} %{inputs}/shtest-recursive-substitution/does-not-substitute-no-limit --show-all | FileCheck --check-prefix=CHECK-TEST3 %s # CHECK-TEST3: PASS: does-not-substitute-no-limit :: test.py # CHECK-TEST3: $ "echo" "%rec4" -# RUN: not %{lit} -j 1 %{inputs}/shtest-recursive-substitution/not-an-integer --show-all 2>&1 | FileCheck --check-prefix=CHECK-TEST4 %s +# RUN: not %{lit} %{inputs}/shtest-recursive-substitution/not-an-integer --show-all 2>&1 | FileCheck --check-prefix=CHECK-TEST4 %s # CHECK-TEST4: recursiveExpansionLimit must be either None or an integer -# RUN: not %{lit} -j 1 %{inputs}/shtest-recursive-substitution/negative-integer --show-all 2>&1 | FileCheck --check-prefix=CHECK-TEST5 %s +# RUN: not %{lit} %{inputs}/shtest-recursive-substitution/negative-integer --show-all 2>&1 | FileCheck --check-prefix=CHECK-TEST5 %s # CHECK-TEST5: recursiveExpansionLimit must be a non-negative integer -# RUN: %{lit} -j 1 %{inputs}/shtest-recursive-substitution/set-to-none --show-all | FileCheck --check-prefix=CHECK-TEST6 %s +# RUN: %{lit} %{inputs}/shtest-recursive-substitution/set-to-none --show-all | FileCheck --check-prefix=CHECK-TEST6 %s # CHECK-TEST6: PASS: set-to-none :: test.py -# RUN: %{lit} -j 1 %{inputs}/shtest-recursive-substitution/escaping --show-all | FileCheck --check-prefix=CHECK-TEST7 %s +# RUN: %{lit} %{inputs}/shtest-recursive-substitution/escaping --show-all | FileCheck --check-prefix=CHECK-TEST7 %s # CHECK-TEST7: PASS: escaping :: test.py # CHECK-TEST7: $ "echo" "%s" "%s" "%%s" Index: llvm/utils/lit/tests/shtest-run-at-line.py =================================================================== --- llvm/utils/lit/tests/shtest-run-at-line.py +++ llvm/utils/lit/tests/shtest-run-at-line.py @@ -1,10 +1,7 @@ # Check that -vv makes the line number of the failing RUN command clear. # (-v is actually sufficient in the case of the internal shell.) -# FIXME: this test depends on order of tests -# RUN: rm -f %{inputs}/shtest-run-at-line/.lit_test_times.txt - -# RUN: not %{lit} -j 1 -vv %{inputs}/shtest-run-at-line > %t.out +# RUN: not %{lit} -vv %{inputs}/shtest-run-at-line > %t.out # RUN: FileCheck --input-file %t.out %s # # END. Index: llvm/utils/lit/tests/shtest-shell.py =================================================================== --- llvm/utils/lit/tests/shtest-shell.py +++ llvm/utils/lit/tests/shtest-shell.py @@ -1,9 +1,6 @@ # Check the internal shell handling component of the ShTest format. -# FIXME: this test depends on order of tests -# RUN: rm -f %{inputs}/shtest-shell/.lit_test_times.txt - -# RUN: not %{lit} -j 1 -v %{inputs}/shtest-shell > %t.out +# RUN: not %{lit} -v %{inputs}/shtest-shell > %t.out # FIXME: Temporarily dump test output so we can debug failing tests on # buildbots. # RUN: cat %t.out @@ -11,9 +8,7 @@ # # Test again in non-UTF shell to catch potential errors with python 2 seen # on stdout-encoding.txt -# FIXME: lit's testing sets source_root == exec_root which complicates running lit more than once per test. -# RUN: rm -f %{inputs}/shtest-shell/.lit_test_times.txt -# RUN: env PYTHONIOENCODING=ascii not %{lit} -j 1 -a %{inputs}/shtest-shell > %t.ascii.out +# RUN: env PYTHONIOENCODING=ascii not %{lit} -a %{inputs}/shtest-shell > %t.ascii.out # FIXME: Temporarily dump test output so we can debug failing tests on # buildbots. # RUN: cat %t.ascii.out Index: llvm/utils/lit/tests/test-data-micro.py =================================================================== --- llvm/utils/lit/tests/test-data-micro.py +++ llvm/utils/lit/tests/test-data-micro.py @@ -1,7 +1,7 @@ # Test features related to formats which support reporting additional test data. # and multiple test results. -# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro | FileCheck %s +# RUN: %{lit} -v %{inputs}/test-data-micro | FileCheck %s # CHECK: -- Testing: Index: llvm/utils/lit/tests/test-data.py =================================================================== --- llvm/utils/lit/tests/test-data.py +++ llvm/utils/lit/tests/test-data.py @@ -1,6 +1,6 @@ # Test features related to formats which support reporting additional test data. -# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out +# RUN: %{lit} -v %{inputs}/test-data > %t.out # RUN: FileCheck < %t.out %s # CHECK: -- Testing: Index: llvm/utils/lit/tests/test-output-micro.py =================================================================== --- llvm/utils/lit/tests/test-output-micro.py +++ llvm/utils/lit/tests/test-output-micro.py @@ -1,4 +1,4 @@ -# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --output %t.results.out +# RUN: %{lit} -v %{inputs}/test-data-micro --output %t.results.out # RUN: FileCheck < %t.results.out %s # RUN: rm %t.results.out Index: llvm/utils/lit/tests/test-output.py =================================================================== --- llvm/utils/lit/tests/test-output.py +++ llvm/utils/lit/tests/test-output.py @@ -1,4 +1,4 @@ -# RUN: %{lit} -j 1 -v %{inputs}/test-data --output %t.results.out > %t.out +# RUN: %{lit} -v %{inputs}/test-data --output %t.results.out > %t.out # RUN: FileCheck < %t.results.out %s # CHECK: {