diff --git a/compiler-rt/test/asan/TestCases/Darwin/asan_log_to_crashreporter.cpp b/compiler-rt/test/asan/TestCases/Darwin/asan_log_to_crashreporter.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/test/asan/TestCases/Darwin/asan_log_to_crashreporter.cpp @@ -0,0 +1,29 @@ +// UNSUPPORTED: ios +// We can reduce the scope of this test to check that we set the crash reporter +// buffers correctly instead of reading from the crashlog. +// For now, disable this test. +// REQUIRES: rdar_74544282 +// REQUIRES: expensive +// Check that ASan reports on OS X actually crash the process (abort_on_error=1) +// and that crash is logged via the crash reporter with ASan logs in the +// Application Specific Information section of the log. + +// RUN: %clangxx_asan %s -o %t + +// crash hard so the crashlog is created. +// RUN: %env_asan_opts=abort_on_error=1 not --crash %run %t > %t.process_output.txt 2>&1 +// RUN: %print_crashreport_for_pid --binary-filename=%basename_t.tmp \ +// RUN: --pid=$(%get_pid_from_output --infile=%t.process_output.txt) \ +// RUN: | FileCheck %s --check-prefixes CHECK-CRASHLOG + +#include +int main() { + char *x = (char *)malloc(10 * sizeof(char)); + free(x); + return x[5]; + // needs to crash hard so the crashlog exists... + // CHECK-CRASHLOG: {{.*Application Specific Information:}} + // CHECK-CRASHLOG-NEXT: {{=====}} + // CHECK-CRASHLOG-NEXT: {{.*ERROR: AddressSanitizer: heap-use-after-free on address}} + // CHECK-CRASHLOG: {{abort()}} +} diff --git a/compiler-rt/test/lit.common.cfg.py b/compiler-rt/test/lit.common.cfg.py --- a/compiler-rt/test/lit.common.cfg.py +++ b/compiler-rt/test/lit.common.cfg.py @@ -12,6 +12,15 @@ import lit.formats import lit.util +# Get shlex.quote if available (added in 3.3), and fall back to pipes.quote if +# it's not available. +try: + import shlex + sh_quote = shlex.quote +except: + import pipes + sh_quote = pipes.quote + # Choose between lit's internal shell pipeline runner and a real shell. If # LIT_USE_INTERNAL_SHELL is in the environment, we use that as an override. use_lit_shell = os.environ.get("LIT_USE_INTERNAL_SHELL") @@ -134,6 +143,9 @@ emulator = get_lit_conf('emulator', None) +def get_ios_commands_dir(): + return os.path.join(config.compiler_rt_src_root, "test", "sanitizer_common", "ios_commands") + # Allow tests to be executed on a simulator or remotely. if emulator: config.substitutions.append( ('%run', emulator) ) @@ -173,7 +185,7 @@ if config.apple_platform != "ios" and config.apple_platform != "iossim": config.available_features.add(config.apple_platform) - ios_commands_dir = os.path.join(config.compiler_rt_src_root, "test", "sanitizer_common", "ios_commands") + ios_commands_dir = get_ios_commands_dir() run_wrapper = os.path.join(ios_commands_dir, ios_or_iossim + "_run.py") env_wrapper = os.path.join(ios_commands_dir, ios_or_iossim + "_env.py") @@ -591,3 +603,19 @@ config.clang = " " + " ".join(run_wrapper + [config.compile_wrapper, config.clang]) + " " config.target_cflags = " " + " ".join(target_cflags + extra_cflags) + " " + +if config.host_os == 'Darwin': + config.substitutions.append(( + "%get_pid_from_output", + "{} {}/get_pid_from_output.py".format( + sh_quote(config.python_executable), + sh_quote(get_ios_commands_dir()) + )) + ) + config.substitutions.append( + ("%print_crashreport_for_pid", + "{} {}/print_crashreport_for_pid.py".format( + sh_quote(config.python_executable), + sh_quote(get_ios_commands_dir()) + )) + ) diff --git a/compiler-rt/test/sanitizer_common/ios_commands/get_pid_from_output.py b/compiler-rt/test/sanitizer_common/ios_commands/get_pid_from_output.py new file mode 100644 --- /dev/null +++ b/compiler-rt/test/sanitizer_common/ios_commands/get_pid_from_output.py @@ -0,0 +1,36 @@ +""" +Parses the id of the process that ran with ASAN from the output logs. +""" +import sys, argparse, re + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help='The sanitizer output to get the pid from') + parser.add_argument('--outfile', nargs='?', type=argparse.FileType('r'), default=sys.stdout, help='Where to write the result') + args = parser.parse_args() + + pid = process_file(args.infile) + args.outfile.write(pid) + args.infile.close() + args.outfile.close() + + + +def process_file(infile): + # check first line is just ==== divider + first_line_pattern = re.compile(r'=*') + assert first_line_pattern.match(infile.readline()) + + # parse out pid from 2nd line + # `==PID==ERROR: SanitizerName: error-type on address...` + pid_pattern = re.compile(r'==([0-9]*)==ERROR:') + pid = pid_pattern.search(infile.readline()).group(1) + + # ignore the rest + + assert pid and pid.isdigit() + + return pid + +if __name__ == '__main__': + main() diff --git a/compiler-rt/test/sanitizer_common/ios_commands/print_crashreport_for_pid.py b/compiler-rt/test/sanitizer_common/ios_commands/print_crashreport_for_pid.py new file mode 100644 --- /dev/null +++ b/compiler-rt/test/sanitizer_common/ios_commands/print_crashreport_for_pid.py @@ -0,0 +1,58 @@ +""" +Finds and prints the crash report associated with a specific (binary filename, process id). +Waits (max_wait_time/attempts_remaining) between retries. +By default, max_wait_time=5 and retry_count=10, which results in a total wait time of ~15s +Errors if the report cannot be found after `retry_count` retries. +""" +import sys, os, argparse, re, glob, shutil, time + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--pid', type=str, required=True, help='The process id of the process that crashed') + parser.add_argument('--binary-filename', type=str, required=True, help='The name of the file that crashed') + parser.add_argument('--retry-count', type=int, nargs='?', default=10, help='The number of retries to make') + parser.add_argument('--max-wait-time', type=float, nargs='?', default=5.0, help='The max amount of seconds to wait between tries') + + parser.add_argument('--dir', nargs='?', type=str, default="~/Library/Logs/DiagnosticReports", help='The directory to look for the crash report') + parser.add_argument('--outfile', nargs='?', type=argparse.FileType('r'), default=sys.stdout, help='Where to write the result') + args = parser.parse_args() + + assert args.pid, "pid can't be empty" + assert args.binary_filename, "binary-filename can't be empty" + + os.chdir(os.path.expanduser(args.dir)) + output_report_with_retries(args.outfile, args.pid.strip(), args.binary_filename, args.retry_count, args.max_wait_time) + +def output_report_with_retries(outfile, pid, filename, attempts_remaining, max_wait_time): + report_name = find_report_in_cur_dir(pid, filename) + if report_name: + with open(report_name, "r") as f: + shutil.copyfileobj(f, outfile) + return + elif(attempts_remaining > 0): + # As the number of attempts remaining decreases, increase the number of seconds waited + # if the max wait time is 2s and there are 10 attempts remaining, wait .2 seconds. + # if the max wait time is 2s and there are 2 attempts remaining, wait 1 second. + time.sleep(max_wait_time / attempts_remaining) + output_report_with_retries(outfile, pid, filename, attempts_remaining - 1, max_wait_time) + else: + raise RuntimeError("Report not found for ({}, {}).".format(filename, pid)) + +def find_report_in_cur_dir(pid, filename): + for report_name in sorted(glob.glob("{}_*.crash".format(filename)), reverse=True): + # parse out pid from first line of report + # `Process: filename [pid]`` + with open(report_name) as cur_report: + pattern = re.compile(r'Process: *{} \[([0-9]*)\]'.format(filename)) + cur_report_pid = pattern.search(cur_report.readline()).group(1) + + assert cur_report_pid and cur_report_pid.isdigit() + if cur_report_pid == pid: + return report_name + + # did not find the crash report + return None + + +if __name__ == '__main__': + main()