Index: test/dosep.py =================================================================== --- test/dosep.py +++ test/dosep.py @@ -20,7 +20,8 @@ E.g., export LLDB_TEST_TIMEOUT=0 or export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=0 -To collect core files for timed out tests, do the following before running dosep.py +To collect core files for timed out tests, +do the following before running dosep.py OSX ulimit -c unlimited @@ -43,6 +44,7 @@ from optparse import OptionParser + def get_timeout_command(): """Search for a suitable timeout command.""" if not sys.platform.startswith("win32"): @@ -70,6 +72,7 @@ dotest_options = None output_on_success = False + def setup_global_variables(lock, counter, total, name_len, options): global output_lock, test_counter, total_tests, test_name_len global dotest_options @@ -79,6 +82,7 @@ test_name_len = name_len dotest_options = options + def report_test_failure(name, command, output): global output_lock with output_lock: @@ -88,6 +92,7 @@ print >> sys.stderr, "Command invoked: %s" % ' '.join(command) update_progress(name) + def report_test_pass(name, output): global output_lock, output_on_success with output_lock: @@ -97,6 +102,7 @@ print >> sys.stderr, "[%s PASSED]" % name update_progress(name) + def update_progress(test_name=""): global output_lock, test_counter, total_tests, test_name_len with output_lock: @@ -111,50 +117,60 @@ sys.stdout.flush() sys.stderr.flush() + def parse_test_results(output): passes = 0 failures = 0 for result in output: - pass_count = re.search("^RESULT:.*([0-9]+) passes", result, re.MULTILINE) - fail_count = re.search("^RESULT:.*([0-9]+) failures", result, re.MULTILINE) - error_count = re.search("^RESULT:.*([0-9]+) errors", result, re.MULTILINE) + pass_count = re.search("^RESULT:.*([0-9]+) passes", + result, re.MULTILINE) + fail_count = re.search("^RESULT:.*([0-9]+) failures", + result, re.MULTILINE) + error_count = re.search("^RESULT:.*([0-9]+) errors", + result, re.MULTILINE) this_fail_count = 0 this_error_count = 0 - if pass_count != None: + if pass_count is not None: passes = passes + int(pass_count.group(1)) - if fail_count != None: + if fail_count is not None: failures = failures + int(fail_count.group(1)) - if error_count != None: + if error_count is not None: failures = failures + int(error_count.group(1)) pass return passes, failures + def call_with_timeout(command, timeout, name): """Run command with a timeout if possible.""" """-s QUIT will create a coredump if they are enabled on your system""" process = None if timeout_command and timeout != "0": command = [timeout_command, '-s', 'QUIT', timeout] + command - # Specifying a value for close_fds is unsupported on Windows when using subprocess.PIPE + # Specifying a value for close_fds is unsupported on Windows when using + # subprocess.PIPE if os.name != "nt": - process = subprocess.Popen(command, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=True) + process = subprocess.Popen(command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True) else: - process = subprocess.Popen(command, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + process = subprocess.Popen(command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) output = process.communicate() exit_status = process.returncode passes, failures = parse_test_results(output) if exit_status == 0: - # stdout does not have any useful information from 'dotest.py', only stderr does. + # stdout does not have any useful information from 'dotest.py', + # only stderr does. report_test_pass(name, output[1]) else: report_test_failure(name, command, output[1]) return exit_status, passes, failures + def process_dir(root, files, test_root, dotest_argv): """Examine a directory for tests, and invoke any found within it.""" timed_out = [] @@ -165,7 +181,8 @@ for name in files: path = os.path.join(root, name) - # We're only interested in the test file with the "Test*.py" naming pattern. + # We're only interested in the test file with the "Test*.py" naming + # pattern. if not name.startswith("Test") or not name.endswith(".py"): continue @@ -180,9 +197,11 @@ timeout_name = os.path.basename(os.path.splitext(name)[0]).upper() - timeout = os.getenv("LLDB_%s_TIMEOUT" % timeout_name) or getDefaultTimeout(dotest_options.lldb_platform_name) + timeout = (os.getenv("LLDB_%s_TIMEOUT" % timeout_name) or + getDefaultTimeout(dotest_options.lldb_platform_name)) - exit_status, pass_count, fail_count = call_with_timeout(command, timeout, name) + exit_status, pass_count, fail_count = call_with_timeout( + command, timeout, name) pass_sub_count = pass_sub_count + pass_count fail_sub_count = fail_sub_count + fail_count @@ -198,12 +217,14 @@ in_q = None out_q = None + def process_dir_worker(arg_tuple): """Worker thread main loop when in multithreaded mode. Takes one directory specification at a time and works on it.""" (root, files, test_root, dotest_argv) = arg_tuple return process_dir(root, files, test_root, dotest_argv) + def walk_and_invoke(test_directory, test_subdir, dotest_argv, num_threads): """Look for matched files and invoke test driver on each one. In single-threaded mode, each test driver is invoked directly. @@ -211,7 +232,8 @@ queue, and then wait for all to complete. test_directory - lldb/test/ directory - test_subdir - lldb/test/ or a subfolder with the tests we're interested in running + test_subdir - lldb/test/ or a subfolder with the tests we're interested in + running """ # Collect the test files that we'll run. @@ -249,7 +271,10 @@ pass_sub_count = 0 for test_result in test_results: - (dir_timed_out, dir_failed, dir_passed, dir_fail_sub_count, dir_pass_sub_count) = test_result + (dir_timed_out, + dir_failed, dir_passed, + dir_fail_sub_count, + dir_pass_sub_count) = test_result timed_out += dir_timed_out failed += dir_failed passed += dir_passed @@ -258,6 +283,7 @@ return (timed_out, failed, passed, fail_sub_count, pass_sub_count) + def getExpectedTimeouts(platform_name): # returns a set of test filenames that might timeout # are we running against a remote target? @@ -280,9 +306,11 @@ "TestCreateAfterAttach.py", "TestEvents.py", "TestExitDuringStep.py", - "TestHelloWorld.py", # Times out in ~10% of the times on the build bot + + # Times out in ~10% of the times on the build bot + "TestHelloWorld.py", "TestMultithreaded.py", - "TestRegisters.py", # ~12/600 dosep runs (build 3120-3122) + "TestRegisters.py", # ~12/600 dosep runs (build 3120-3122) "TestThreadStepOut.py", } elif target.startswith("android"): @@ -299,10 +327,12 @@ } elif target.startswith("darwin"): expected_timeout |= { - "TestThreadSpecificBreakpoint.py", # times out on MBP Retina, Mid 2012 + # times out on MBP Retina, Mid 2012 + "TestThreadSpecificBreakpoint.py", } return expected_timeout + def getDefaultTimeout(platform_name): if os.getenv("LLDB_TEST_TIMEOUT"): return os.getenv("LLDB_TEST_TIMEOUT") @@ -320,6 +350,7 @@ with open(fname, 'a'): os.utime(fname, times) + def find(pattern, path): result = [] for root, dirs, files in os.walk(path): @@ -328,6 +359,7 @@ result.append(os.path.join(root, name)) return result + def main(): # We can't use sys.path[0] to determine the script directory # because it doesn't work under a debugger @@ -352,27 +384,32 @@ E.g., export LLDB_TEST_TIMEOUT=0 or export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=0 """) - parser.add_option('-o', '--options', - type='string', action='store', - dest='dotest_options', - help="""The options passed to 'dotest.py' if specified.""") - - parser.add_option('-s', '--output-on-success', - action='store_true', - dest='output_on_success', - default=False, - help="""Print full output of 'dotest.py' even when it succeeds.""") - - parser.add_option('-t', '--threads', - type='int', - dest='num_threads', - help="""The number of threads to use when running tests separately.""") + parser.add_option( + '-o', '--options', + type='string', action='store', + dest='dotest_options', + help="""The options passed to 'dotest.py' if specified.""") + + parser.add_option( + '-s', '--output-on-success', + action='store_true', + dest='output_on_success', + default=False, + help="""Print full output of 'dotest.py' even when it succeeds.""") + + parser.add_option( + '-t', '--threads', + type='int', + dest='num_threads', + help="""The number of threads to use when running tests separately.""") opts, args = parser.parse_args() dotest_option_string = opts.dotest_options is_posix = (os.name == "posix") - dotest_argv = shlex.split(dotest_option_string, posix=is_posix) if dotest_option_string else [] + dotest_argv = (shlex.split(dotest_option_string, posix=is_posix) + if dotest_option_string + else []) parser = dotest_args.create_parser() global dotest_options @@ -385,7 +422,7 @@ # every dotest invocation from creating its own directory import datetime # The windows platforms don't like ':' in the pathname. - timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S") + timestamp_started = datetime.datetime.now().strftime("%F-%H_%M_%S") dotest_argv.append('-s') dotest_argv.append(timestamp_started) dotest_options.s = timestamp_started @@ -415,7 +452,8 @@ num_threads = 1 system_info = " ".join(platform.uname()) - (timed_out, failed, passed, all_fails, all_passes) = walk_and_invoke(test_directory, test_subdir, dotest_argv, num_threads) + (timed_out, failed, passed, all_fails, all_passes) = walk_and_invoke( + test_directory, test_subdir, dotest_argv, num_threads) timed_out = set(timed_out) num_test_files = len(failed) + len(passed) @@ -445,10 +483,16 @@ touch(os.path.join(session_dir, "{}-{}".format(result, test_name))) print - print "Ran %d test suites (%d failed) (%f%%)" % (num_test_files, len(failed), - (100.0 * len(failed) / num_test_files) if num_test_files > 0 else float('NaN')) - print "Ran %d test cases (%d failed) (%f%%)" % (num_tests, all_fails, - (100.0 * all_fails / num_tests) if num_tests > 0 else float('NaN')) + print "Ran %d test suites (%d failed) (%f%%)" % ( + num_test_files, len(failed), + (100.0 * len(failed) / num_test_files) + if num_test_files > 0 + else float('NaN')) + print "Ran %d test cases (%d failed) (%f%%)" % ( + num_tests, all_fails, + (100.0 * all_fails / num_tests) + if num_tests > 0 + else float('NaN')) if len(failed) > 0: failed.sort() print "Failing Tests (%d)" % len(failed)