diff --git a/compiler-rt/test/msan/CMakeLists.txt b/compiler-rt/test/msan/CMakeLists.txt --- a/compiler-rt/test/msan/CMakeLists.txt +++ b/compiler-rt/test/msan/CMakeLists.txt @@ -8,7 +8,7 @@ darwin_filter_host_archs(MSAN_SUPPORTED_ARCH MSAN_TEST_ARCH) endif() -macro(add_msan_testsuite arch lld thinlto) +macro(add_msan_testsuite arch lld thinlto eager) set(MSAN_TEST_TARGET_ARCH ${arch}) get_test_cc_for_arch(${arch} MSAN_TEST_TARGET_CC MSAN_TEST_TARGET_CFLAGS) @@ -24,8 +24,12 @@ list(APPEND MSAN_TEST_DEPS lld) endif() endif() + if (${eager}) + set(CONFIG_NAME "eager-${CONFIG_NAME}") + endif() set(MSAN_TEST_USE_THINLTO ${thinlto}) set(MSAN_TEST_USE_LLD ${lld}) + set(MSAN_TEST_EAGER_CHECK ${eager}) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in @@ -34,10 +38,11 @@ endmacro() foreach(arch ${MSAN_TEST_ARCH}) - add_msan_testsuite(${arch} False False) + add_msan_testsuite(${arch} False False False) + add_msan_testsuite(${arch} False False True) if(COMPILER_RT_HAS_LLD AND arch STREQUAL "x86_64" AND NOT (APPLE OR WIN32)) - add_msan_testsuite(${arch} True False) + add_msan_testsuite(${arch} True False False) endif() endforeach() diff --git a/compiler-rt/test/msan/bitfield.cpp b/compiler-rt/test/msan/bitfield.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/test/msan/bitfield.cpp @@ -0,0 +1,44 @@ +// RUN: %clangxx_msan -O0 %s -o %t && %run %t +// RUN: %clangxx_msan -O1 %s -o %t && %run %t +// RUN: %clangxx_msan -O3 %s -o %t && %run %t + +#include + +struct small { + char field : 5; +}; + +struct multiple_promote24 { + int field1 : 23; + int field2 : 23; + int field3 : 23; +}; + +small make_small() { + small s; + ((volatile small *)&s)->field = 0; + return s; +} + +multiple_promote24 make_promote24() { + multiple_promote24 m; + ((volatile multiple_promote24 *)&m)->field1 = 0; + ((volatile multiple_promote24 *)&m)->field2 = 0; + ((volatile multiple_promote24 *)&m)->field3 = 0; + return m; +} + +int main(void) { + volatile small s = make_small(); + volatile multiple_promote24 m = make_promote24(); + + int check; + check = s.field; + __msan_check_mem_is_initialized(&check, sizeof check); + check = m.field1; + __msan_check_mem_is_initialized(&check, sizeof check); + check = m.field2; + __msan_check_mem_is_initialized(&check, sizeof check); + check = m.field3; + __msan_check_mem_is_initialized(&check, sizeof check); +} \ No newline at end of file diff --git a/compiler-rt/test/msan/chained_origin.cpp b/compiler-rt/test/msan/chained_origin.cpp --- a/compiler-rt/test/msan/chained_origin.cpp +++ b/compiler-rt/test/msan/chained_origin.cpp @@ -16,34 +16,35 @@ // RUN: FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-HEAP < %t.out #include +#include "test.h" -volatile int x, y; +volatile PartInit x, y; __attribute__((noinline)) -void fn_g(int a) { - x = a; +void fn_g(PartInit a) { + x.val = a.val; } __attribute__((noinline)) -void fn_f(int a) { +void fn_f(PartInit a) { fn_g(a); } __attribute__((noinline)) void fn_h() { - y = x; + y.val = x.val; } int main(int argc, char *argv[]) { #ifdef HEAP - int * volatile zz = new int; - int z = *zz; + PartInit * volatile zz = new PartInit; + PartInit z = *zz; #else - int volatile z; + PartInit volatile z; #endif - fn_f(z); + fn_f({z.val}); fn_h(); - return y; + return y.val; } // CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value @@ -60,7 +61,7 @@ // CHECK-FULL-STACK: {{#2 .* in main.*chained_origin.cpp:}}[[@LINE-16]] // CHECK-SHORT-STACK: {{#0 .* in fn_g.*chained_origin.cpp:}}[[@LINE-37]] -// CHECK-STACK: Uninitialized value was created by an allocation of 'z' in the stack frame of function 'main' +// CHECK-STACK: Uninitialized value was created by an allocation of 'z.sroa.0' in the stack frame of function 'main' // CHECK-STACK: {{#0 .* in main.*chained_origin.cpp:}}[[@LINE-27]] // CHECK-HEAP: Uninitialized value was created by a heap allocation diff --git a/compiler-rt/test/msan/chained_origin_empty_stack.cpp b/compiler-rt/test/msan/chained_origin_empty_stack.cpp --- a/compiler-rt/test/msan/chained_origin_empty_stack.cpp +++ b/compiler-rt/test/msan/chained_origin_empty_stack.cpp @@ -14,21 +14,22 @@ // CHECK: #0 {{.*}} in main #include +#include "test.h" volatile int x; __attribute__((noinline)) -void fn_g(int a) { - x = a; +void fn_g(PartInit a) { + x = a.val; } __attribute__((noinline)) -void fn_f(int a) { +void fn_f(PartInit a) { fn_g(a); } int main(int argc, char *argv[]) { int volatile z; - fn_f(z); + fn_f({z}); return x; } diff --git a/compiler-rt/test/msan/chained_origin_memcpy.cpp b/compiler-rt/test/msan/chained_origin_memcpy.cpp --- a/compiler-rt/test/msan/chained_origin_memcpy.cpp +++ b/compiler-rt/test/msan/chained_origin_memcpy.cpp @@ -17,18 +17,19 @@ #include #include +#include "test.h" int xx[10000]; int yy[10000]; volatile int idx = 30; __attribute__((noinline)) -void fn_g(int a, int b) { - xx[idx] = a; xx[idx + 10] = b; +void fn_g(PartInit a, PartInit b) { + xx[idx] = a.val; xx[idx + 10] = b.val; } __attribute__((noinline)) -void fn_f(int a, int b) { +void fn_f(PartInit a, PartInit b) { fn_g(a, b); } @@ -40,7 +41,7 @@ int main(int argc, char *argv[]) { int volatile z1; int volatile z2; - fn_f(z1, z2); + fn_f({z1}, {z2}); fn_h(); return yy[idx + OFFSET]; } diff --git a/compiler-rt/test/msan/cxa_atexit.cpp b/compiler-rt/test/msan/cxa_atexit.cpp --- a/compiler-rt/test/msan/cxa_atexit.cpp +++ b/compiler-rt/test/msan/cxa_atexit.cpp @@ -4,6 +4,7 @@ #include #include +#include "test.h" class A { public: // This destructor get stale argument shadow left from the call to f(). @@ -16,12 +17,12 @@ A a; __attribute__((noinline)) -void f(long x) { +void f(PartInit x) { } int main(void) { long x; - long * volatile p = &x; + PartInit * volatile p = (PartInit *)&x; // This call poisons TLS shadow for the first function argument. f(*p); return 0; diff --git a/compiler-rt/test/msan/in-struct-padding.cpp b/compiler-rt/test/msan/in-struct-padding.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/test/msan/in-struct-padding.cpp @@ -0,0 +1,86 @@ +// RUN: %clangxx_msan -O0 %s -o %t && %run %t +// RUN: %clangxx_msan -O1 %s -o %t && %run %t +// RUN: %clangxx_msan -O3 %s -o %t && %run %t + +#include + +struct mini { + char a; + int b; +}; + +struct medium { + char a; + uint64_t b; +}; + +struct large { + char a; + uint64_t c[4]; +}; + +struct nested { + medium a; +}; + +struct superclass : medium { +}; + +template +void volatile_init(T &val) { + *(volatile T *)&val = 0; +} + +template +__attribute__((noinline)) +T passthru_byval(T t) { + return t; +} + +template +__attribute__((noinline)) +T &passthru_byref(T &t) { + return t; +} + +int main() { + mini m; + volatile_init(m.a); + volatile_init(m.b); + medium med; + volatile_init(med.a); + volatile_init(med.b); + large l; + volatile_init(l.a); + volatile_init(l.c[0]); + volatile_init(l.c[1]); + volatile_init(l.c[2]); + volatile_init(l.c[3]); + nested ns; + volatile_init(ns.a.a); + volatile_init(ns.a.b); + superclass sp; + volatile_init(sp.a); + volatile_init(sp.b); + + mini mval = passthru_byval(m); + mini &mref = passthru_byref(m); + + medium medval = passthru_byval(med); + medium &medref = passthru_byref(med); + + large lgval = passthru_byval(l); + large &lgref = passthru_byref(l); + + nested nsval = passthru_byval(ns); + nested &nsref = passthru_byref(ns); + + superclass spval = passthru_byval(sp); + superclass &spref = passthru_byref(sp); + + asm volatile ("" :: "r"(&mval), "r"(&mref)); + asm volatile ("" :: "r"(&medval), "r"(&medref)); + asm volatile ("" :: "r"(&lgval), "r"(&lgref)); + asm volatile ("" :: "r"(&nsval), "r"(&nsref)); + asm volatile ("" :: "r"(&spval), "r"(&spref)); +} \ No newline at end of file diff --git a/compiler-rt/test/msan/insertvalue_origin.cpp b/compiler-rt/test/msan/insertvalue_origin.cpp --- a/compiler-rt/test/msan/insertvalue_origin.cpp +++ b/compiler-rt/test/msan/insertvalue_origin.cpp @@ -7,22 +7,24 @@ #include #include +#include "test.h" struct mypair { int64_t x; int y; }; -mypair my_make_pair(int64_t x, int y) { +// Suppress eager checking +PartInit my_make_pair(PartInit x, int y) { mypair p; - p.x = x; + p.x = x.val; p.y = y; - return p; + return {p}; } int main() { - int64_t * volatile p = new int64_t; - mypair z = my_make_pair(*p, 0); + PartInit *volatile p = new PartInit; + mypair z = *my_make_pair(*p, 0); if (z.x) printf("zzz\n"); // CHECK: MemorySanitizer: use-of-uninitialized-value diff --git a/compiler-rt/test/msan/lit.site.cfg.py.in b/compiler-rt/test/msan/lit.site.cfg.py.in --- a/compiler-rt/test/msan/lit.site.cfg.py.in +++ b/compiler-rt/test/msan/lit.site.cfg.py.in @@ -7,6 +7,9 @@ config.use_lld = @MSAN_TEST_USE_LLD@ config.use_thinlto = @MSAN_TEST_USE_THINLTO@ +if @MSAN_TEST_EAGER_CHECK@: + config.target_cflags += " -mllvm -msan-eager-checks" + # Load common config for all compiler-rt lit tests. lit_config.load_config(config, "@COMPILER_RT_BINARY_DIR@/test/lit.common.configured") diff --git a/compiler-rt/test/msan/no_sanitize_memory_prop.cpp b/compiler-rt/test/msan/no_sanitize_memory_prop.cpp --- a/compiler-rt/test/msan/no_sanitize_memory_prop.cpp +++ b/compiler-rt/test/msan/no_sanitize_memory_prop.cpp @@ -7,18 +7,19 @@ #include #include +#include "test.h" __attribute__((noinline)) __attribute__((weak)) __attribute__((no_sanitize_memory)) -int f(int x) { +PartInit f(PartInit x) { return x; } int main(void) { - int x; - int * volatile p = &x; - int y = f(*p); + PartInit x; + PartInit * volatile p = &x; + int y = f(*p).val; if (y) exit(0); return 0; diff --git a/compiler-rt/test/msan/param_tls_limit.cpp b/compiler-rt/test/msan/param_tls_limit.cpp --- a/compiler-rt/test/msan/param_tls_limit.cpp +++ b/compiler-rt/test/msan/param_tls_limit.cpp @@ -17,6 +17,7 @@ #include #include +#include "test.h" // This test assumes that ParamTLS size is 800 bytes. @@ -36,7 +37,6 @@ #define PARTIAL_OVERFLOW(x) assert(__msan_test_shadow((char *)(&(x) + 1) - 1, 1) == -1) #endif - template struct S { char x[N]; @@ -58,7 +58,7 @@ PARTIAL_OVERFLOW(s); } -void f_many(int a, double b, S<800> s, int c, double d) { +void f_many(PartInit a, PartInit b, S<800> s, PartInit c, PartInit d) { NO_OVERFLOW(a); NO_OVERFLOW(b); PARTIAL_OVERFLOW(s); @@ -68,7 +68,7 @@ // -8 bytes for "int a", aligned by 8 // -2 to make "int c" a partial fit -void f_many2(int a, S<800 - 8 - 2> s, int c, double d) { +void f_many2(PartInit a, S<800 - 8 - 2> s, PartInit c, PartInit d) { NO_OVERFLOW(a); NO_OVERFLOW(s); PARTIAL_OVERFLOW(c); @@ -85,8 +85,8 @@ f801(s801); f1000(s1000); - int i; - double d; + PartInit i; + PartInit d; f_many(i, d, s800, i, d); S<800 - 8 - 2> s788; diff --git a/compiler-rt/test/msan/parameter-mixing.cpp b/compiler-rt/test/msan/parameter-mixing.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/test/msan/parameter-mixing.cpp @@ -0,0 +1,96 @@ +// Mixes setting parameters which are byval, partialinit, and neither. +// RUN: %clangxx_msan -O0 %s -o %t && %run %t +// RUN: %clangxx_msan -O1 %s -o %t && %run %t +// RUN: %clangxx_msan -O3 %s -o %t && %run %t + +#include +#include + +union mini { + int a; + char b; +}; + +struct large { + char data[0x100]; +}; + +__attribute__((noinline)) +void poison_caller_tls_inner(large l) { + asm volatile ("" :: "r"(&l) ); +} +__attribute__((noinline)) +void poison_caller_tls() { + large l; + poison_caller_tls_inner(l); +} + +template +void volatile_init(T &val) { + *(volatile T *)&val = 0; +} + +template +__attribute__((noinline)) +T passthru(T &val) { + return val; +} + +__attribute__((noinline)) +void callee(mini m1, large l1, int i1, mini m2, large l2, int i2) { + mini m1_ = passthru(m1); + mini m2_ = passthru(m2); + large l1_ = passthru(l1); + large l2_ = passthru(l2); + int i1_ = passthru(i1); + int i2_ = passthru(i2); + + asm volatile ("" :: "r"(&m1_) ); + asm volatile ("" :: "r"(&m2_) ); + asm volatile ("" :: "r"(&l1_) ); + asm volatile ("" :: "r"(&l2_) ); + asm volatile ("" :: "r"(&i1_) ); + asm volatile ("" :: "r"(&l2_) ); +} + +__attribute__((noinline)) +void callee_va(int _unused, ...) { + va_list vl; + va_start(vl, _unused); + + mini m; + large l; + int i; + + m = va_arg(vl, mini); + l = va_arg(vl, large); + i = va_arg(vl, int); + + __msan_check_mem_is_initialized(&m, sizeof m); + __msan_check_mem_is_initialized(&l, sizeof l); + __msan_check_mem_is_initialized(&i, sizeof i); + + m = va_arg(vl, mini); + l = va_arg(vl, large); + i = va_arg(vl, int); + + __msan_check_mem_is_initialized(&m, sizeof m); + __msan_check_mem_is_initialized(&l, sizeof l); + __msan_check_mem_is_initialized(&i, sizeof i); + + va_end(vl); + +} + +int main(void) { + large l = {}; + mini m; + volatile_init(m.b); + int i = 0; + poison_caller_tls(); + callee(m, l, i, m, l, i); + + volatile_init(m.a); + poison_caller_tls(); + callee_va(0, m, l, i, m, l, i); +} diff --git a/compiler-rt/test/msan/qsort.cpp b/compiler-rt/test/msan/qsort.cpp --- a/compiler-rt/test/msan/qsort.cpp +++ b/compiler-rt/test/msan/qsort.cpp @@ -9,18 +9,19 @@ #include #include +#include "test.h" constexpr size_t kSize1 = 27; constexpr size_t kSize2 = 7; bool seen2; -void dummy(long a, long b, long c, long d, long e) {} +void dummy(PartInit a, PartInit b, PartInit c, PartInit d, PartInit e) {} void poison_stack_and_param() { char x[10000]; int y; - dummy(y, y, y, y, y); + dummy({y}, {y}, {y}, {y}, {y}); } __attribute__((always_inline)) int cmp(long a, long b) { diff --git a/compiler-rt/test/msan/signal_stress_test.cpp b/compiler-rt/test/msan/signal_stress_test.cpp --- a/compiler-rt/test/msan/signal_stress_test.cpp +++ b/compiler-rt/test/msan/signal_stress_test.cpp @@ -27,10 +27,17 @@ int sigcnt; +// Used to suppress eager checking +union VoidP { + void *ptr; + char __partial_init; +}; + void SignalHandler(int signo) { assert(signo == SIGPROF); void *p; - void **volatile q = &p; + VoidP p_wrap = { p }; + VoidP *volatile q = &p_wrap; f(true, 10, *q, *q, *q, *q, *q, *q, *q, *q, *q, *q); @@ -48,7 +55,8 @@ setitimer(ITIMER_PROF, &itv, NULL); void *p; - void **volatile q = &p; + VoidP p_wrap = { p }; + VoidP *volatile q = &p_wrap; do { f(false, 20, diff --git a/compiler-rt/test/msan/stack-origin2.cpp b/compiler-rt/test/msan/stack-origin2.cpp --- a/compiler-rt/test/msan/stack-origin2.cpp +++ b/compiler-rt/test/msan/stack-origin2.cpp @@ -19,18 +19,19 @@ // RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-ORIGINS < %t.out #include +#include "test.h" extern "C" -int f(int depth) { +PartInit f(int depth) { if (depth) return f(depth - 1); - int x; - int *volatile p = &x; + PartInit x; + PartInit *volatile p = &x; return *p; } int main(int argc, char **argv) { - return f(1); + return f(1).val; // CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value // CHECK: {{#0 0x.* in main .*stack-origin2.cpp:}}[[@LINE-2]] diff --git a/compiler-rt/test/msan/test.h b/compiler-rt/test/msan/test.h --- a/compiler-rt/test/msan/test.h +++ b/compiler-rt/test/msan/test.h @@ -1,3 +1,5 @@ +#pragma once + #if __LP64__ # define SANITIZER_WORDSIZE 64 #else @@ -5,7 +7,7 @@ #endif // This is a simplified version of GetMaxVirtualAddress function. -unsigned long SystemVMA () { +inline unsigned long SystemVMA () { #if SANITIZER_WORDSIZE == 64 unsigned long vma = (unsigned long)__builtin_frame_address(0); return SANITIZER_WORDSIZE - __builtin_clzll(vma); @@ -13,3 +15,12 @@ return SANITIZER_WORDSIZE; #endif } + +template +union PartInit { + T val; + union {} __empty[0]; + + __attribute__((always_inline)) + T &operator *() { return val; } +}; \ No newline at end of file diff --git a/compiler-rt/test/msan/unaligned_read_origin.cpp b/compiler-rt/test/msan/unaligned_read_origin.cpp --- a/compiler-rt/test/msan/unaligned_read_origin.cpp +++ b/compiler-rt/test/msan/unaligned_read_origin.cpp @@ -4,13 +4,18 @@ // RUN: FileCheck %s < %t.out && FileCheck %s < %t.out #include +#include int main(int argc, char **argv) { int x; - int *volatile p = &x; + int *volatile x_p = &x; + int storage = 0; + int *volatile p = &storage; + __sanitizer_unaligned_store32(p, *x_p); + assert(__msan_test_shadow(p, sizeof *p) != -1); return __sanitizer_unaligned_load32(p); // CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value // CHECK: {{#0 0x.* in main .*unaligned_read_origin.cpp:}}[[@LINE-2]] // CHECK: Uninitialized value was created by an allocation of 'x' in the stack frame of function 'main' - // CHECK: {{#0 0x.* in main .*unaligned_read_origin.cpp:}}[[@LINE-7]] + // CHECK: {{#0 0x.* in main .*unaligned_read_origin.cpp:}}[[@LINE-11]] } diff --git a/compiler-rt/test/msan/unpoison_param.cpp b/compiler-rt/test/msan/unpoison_param.cpp --- a/compiler-rt/test/msan/unpoison_param.cpp +++ b/compiler-rt/test/msan/unpoison_param.cpp @@ -8,43 +8,44 @@ #include #include +#include "test.h" #if __has_feature(memory_sanitizer) -__attribute__((noinline)) int bar(int a, int b) { +__attribute__((noinline)) int bar(PartInit a, PartInit b) { volatile int zero = 0; return zero; } -int foo(int a, int b, int unpoisoned_params) { +int foo(PartInit a, PartInit b, int unpoisoned_params) { if (unpoisoned_params == 0) { - assert(__msan_test_shadow(&a, sizeof(a)) == 0); - assert(__msan_test_shadow(&b, sizeof(b)) == 0); + assert(__msan_test_shadow(&a.val, sizeof(a)) == 0); + assert(__msan_test_shadow(&b.val, sizeof(b)) == 0); } else if (unpoisoned_params == 1) { - assert(__msan_test_shadow(&a, sizeof(a)) == -1); - assert(__msan_test_shadow(&b, sizeof(b)) == 0); + assert(__msan_test_shadow(&a.val, sizeof(a)) == -1); + assert(__msan_test_shadow(&b.val, sizeof(b)) == 0); } else if (unpoisoned_params == 2) { - assert(__msan_test_shadow(&a, sizeof(a)) == -1); - assert(__msan_test_shadow(&b, sizeof(b)) == -1); + assert(__msan_test_shadow(&a.val, sizeof(a)) == -1); + assert(__msan_test_shadow(&b.val, sizeof(b)) == -1); } // Poisons parameter shadow in TLS so that the next call from uninstrumented // main has params 1 and 2 poisoned no matter what. int x, y; - return bar(x, y); + return bar({x}, {y}); } #else -int foo(int, int, int); +int foo(PartInit, PartInit, int); int main() { - foo(0, 0, 2); // Poison parameters for next call. - foo(0, 0, 0); // Check that both params are poisoned. + foo({0}, {0}, 2); // Poison parameters for next call. + foo({0}, {0}, 0); // Check that both params are poisoned. __msan_unpoison_param(1); - foo(0, 0, 1); // Check that only first param is unpoisoned. + foo({0}, {0}, 1); // Check that only first param is unpoisoned. __msan_unpoison_param(2); - foo(0, 0, 2); // Check that first and second params are unpoisoned. + foo({0}, {0}, 2); // Check that first and second params are unpoisoned. return 0; } diff --git a/compiler-rt/test/msan/vararg.cpp b/compiler-rt/test/msan/vararg.cpp --- a/compiler-rt/test/msan/vararg.cpp +++ b/compiler-rt/test/msan/vararg.cpp @@ -1,16 +1,24 @@ -// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -O3 %s -o %t && \ +// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -DEXPECT_PASS -O3 %s -o %t && \ +// RUN: not %run %t va_arg_tls >%t.out 2>&1 +// RUN: not [ -s %t.out ] + +// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -DEXPECT_PASS -O3 %s -o %t && \ +// RUN: not %run %t overflow >%t.out 2>&1 +// RUN: not [ -s %t.out ] + +// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -DEXPECT_FAIL -O3 %s -o %t && \ // RUN: not %run %t va_arg_tls >%t.out 2>&1 // RUN: FileCheck %s --check-prefix=CHECK < %t.out -// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -O3 %s -o %t && \ +// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -DEXPECT_FAIL -O3 %s -o %t && \ // RUN: not %run %t overflow >%t.out 2>&1 // RUN: FileCheck %s --check-prefix=CHECK < %t.out -// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -O3 %s -o %t && \ +// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -DEXPECT_FAIL -O3 %s -o %t && \ // RUN: not %run %t va_arg_tls >%t.out 2>&1 // RUN: FileCheck %s --check-prefixes=CHECK,CHECK-ORIGIN < %t.out -// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -O3 %s -o %t && \ +// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -DEXPECT_FAIL -O3 %s -o %t && \ // RUN: not %run %t overflow >%t.out 2>&1 // RUN: FileCheck %s --check-prefixes=CHECK,CHECK-ORIGIN < %t.out @@ -24,6 +32,21 @@ #include #include +#include +#include "test.h" + +__attribute__((noinline)) +void dummy(PartInit a, PartInit b, PartInit c, PartInit d, PartInit e) { + __asm__ __volatile__ (""::"r"(a), "r"(b), "r"(c), "r"(d), "r"(e)); +} + +__attribute__((noinline)) +void poison_stack_and_param() { + char x[10000]; + int y; + dummy({y}, {y}, {y}, {y}, {y}); +} + __attribute__((noinline)) int sum(int n, ...) { va_list args; @@ -39,15 +62,28 @@ } int main(int argc, char *argv[]) { +#if defined(EXPECT_FAIL) volatile int uninit; + #define PRE_RUN for (int i = 0; i < 6; i++) __msan_unpoison_param(i) + // CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value + // CHECK-ORIGIN: Uninitialized value was created by an allocation of 'uninit' in the stack frame of function 'main' +#elif defined(EXPECT_PASS) + volatile int uninit = 0; + #define PRE_RUN poison_stack_and_param() +#else + #error Bad configuration +#endif + volatile int a = 1, b = 2; if (argc == 2) { // Shadow/origin will be passed via va_arg_tls/va_arg_origin_tls. if (strcmp(argv[1], "va_arg_tls") == 0) { + PRE_RUN; return sum(3, uninit, a, b); } // Shadow/origin of |uninit| will be passed via overflow area. if (strcmp(argv[1], "overflow") == 0) { + PRE_RUN; return sum(7, a, a, a, a, a, a, uninit ); @@ -55,6 +91,3 @@ } return 0; } - -// CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value -// CHECK-ORIGIN: Uninitialized value was created by an allocation of 'uninit' in the stack frame of function 'main' diff --git a/compiler-rt/test/msan/vector_cvt.cpp b/compiler-rt/test/msan/vector_cvt.cpp --- a/compiler-rt/test/msan/vector_cvt.cpp +++ b/compiler-rt/test/msan/vector_cvt.cpp @@ -3,9 +3,10 @@ // REQUIRES: x86_64-target-arch #include +#include "test.h" -int to_int(double v) { - __m128d t = _mm_set_sd(v); +int to_int(PartInit v) { + __m128d t = _mm_set_sd(*v); int x = _mm_cvtsd_si32(t); return x; // CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value @@ -18,7 +19,7 @@ #else double v = 1.1; #endif - double* volatile p = &v; + PartInit* volatile p = (PartInit *)&v; int x = to_int(*p); return !x; } diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -281,6 +281,10 @@ cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true)); +static cl::opt ClEagerChecks("msan-eager-checks", + cl::desc("check arguments at function call boundaries"), + cl::Hidden, cl::init(false)); + static cl::opt ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics"), cl::Hidden, cl::init(false)); @@ -869,6 +873,28 @@ M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(), PointerType::get(IRB.getInt8Ty(), 0), IntptrTy); + // Whitelist msan unaligned load functions which are explicitly designed + // to allow loading uninitialized values. + static const llvm::StringRef ExpectUninitRetval[] = { + "__sanitizer_unaligned_load16", "__sanitizer_unaligned_load32", "__sanitizer_unaligned_load64", + }; + for (auto &name : ExpectUninitRetval) { + if (Function *func = M.getFunction(name)) + func->addAttribute(AttributeList::ReturnIndex, Attribute::PartialInit); + } + + // Whitelist msan unaligned store functions which are explicitly designed + // to allow storing uninitialized values + static const llvm::StringRef ExpectUninitOperands[] = { + "__sanitizer_unaligned_store16", "__sanitizer_unaligned_store32", "__sanitizer_unaligned_store64", + }; + for (auto &name : ExpectUninitOperands) { + if (Function *func = M.getFunction(name)) { + for (unsigned op = 0; op < 2; op++) + func->addAttribute(AttributeList::FirstArgIndex+op, Attribute::PartialInit); + } + } + if (CompileKernel) { createKernelApi(M); } else { @@ -992,6 +1018,11 @@ } bool MemorySanitizerLegacyPass::doInitialization(Module &M) { + if (ClEagerChecks && !M.getModuleFlag("DisallowPoisonedCallArguments")) { + report_fatal_error("cannot use MSan eager checks; compiler does not support argument init analysis"); + return false; + } + if (!Options.Kernel) insertModuleCtor(M); MSan.emplace(M, Options); @@ -1088,7 +1119,8 @@ PoisonUndef = SanitizeFunction && ClPoisonUndef; // FIXME: Consider using SpecialCaseList to specify a list of functions that // must always return fully initialized values. For now, we hardcode "main". - CheckReturnValue = SanitizeFunction && (F.getName() == "main"); + CheckReturnValue = F.getName() == "main"; + CheckReturnValue &= SanitizeFunction; MS.initializeCallbacks(*F.getParent()); if (MS.CompileKernel) @@ -1232,7 +1264,24 @@ void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin, bool AsCall) { + + if (StructType *strct = dyn_cast(Shadow->getType())) { + unsigned i = 0; + for (auto it = strct->element_begin(); it != strct->element_end(); i++, it++) { + Value *ShadowItem; + { + IRBuilder<> IRB(OrigIns); + ShadowItem = IRB.CreateExtractValue(Shadow, i); + assert(ShadowItem && "Failed to create ExtractValue instruction!"); + } + + materializeOneCheck(OrigIns, ShadowItem, Origin, AsCall); + } + return; + } + IRBuilder<> IRB(OrigIns); + LLVM_DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); LLVM_DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); @@ -1635,14 +1684,23 @@ LLVM_DEBUG(dbgs() << "Arg is not sized\n"); continue; } + + bool FArgByVal = FArg.hasByValAttr(); + bool FArgPartialInit = FArg.hasAttribute(Attribute::PartialInit); + bool FArgCheck = ClEagerChecks && !FArgByVal && !FArgPartialInit; unsigned Size = FArg.hasByValAttr() ? DL.getTypeAllocSize(FArg.getParamByValType()) : DL.getTypeAllocSize(FArg.getType()); + if (A == &FArg) { bool Overflow = ArgOffset + Size > kParamTLSSize; - Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); - if (FArg.hasByValAttr()) { + if (FArgCheck) { + *ShadowPtr = getCleanShadow(V); + setOrigin(A, getCleanOrigin()); + continue; + } else if (FArgByVal) { + Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); // ByVal pointer itself has clean shadow. We copy the actual // argument shadow to the underlying memory. // Figure out maximal valid memcpy alignment. @@ -1667,6 +1725,8 @@ } *ShadowPtr = getCleanShadow(V); } else { + // Shadow over TLS + Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); if (Overflow) { // ParamTLS overflow. *ShadowPtr = getCleanShadow(V); @@ -1685,7 +1745,9 @@ setOrigin(A, getCleanOrigin()); } } - ArgOffset += alignTo(Size, kShadowTLSAlignment); + + if (!FArgCheck) + ArgOffset += alignTo(Size, kShadowTLSAlignment); } assert(*ShadowPtr && "Could not find shadow for an argument"); return *ShadowPtr; @@ -1729,8 +1791,8 @@ if (!InsertChecks) return; #ifndef NDEBUG Type *ShadowTy = Shadow->getType(); - assert((isa(ShadowTy) || isa(ShadowTy)) && - "Can only insert checks for integer and vector shadow types"); + assert((isa(ShadowTy) || isa(ShadowTy) || isa(ShadowTy)) && + "Can only insert checks for integer, vector, and struct shadow types"); #endif InstrumentationList.push_back( ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); @@ -3374,7 +3436,16 @@ << " Shadow: " << *ArgShadow << "\n"); bool ArgIsInitialized = false; const DataLayout &DL = F.getParent()->getDataLayout(); - if (CB.paramHasAttr(i, Attribute::ByVal)) { + + bool ByVal = CB.paramHasAttr(i, Attribute::ByVal); + bool PartialInit = CB.paramHasAttr(i, Attribute::PartialInit); + bool Check = ClEagerChecks && !ByVal && !PartialInit; + + if (Check) { + insertShadowCheck(A, &CB); + continue; + } else if (ByVal) { + // ByVal requires some special handling as it's too big for a single load assert(A->getType()->isPointerTy() && "ByVal argument is not a pointer!"); Size = DL.getTypeAllocSize(CB.getParamByValType(i)); @@ -3392,10 +3463,12 @@ Alignment, Size); // TODO(glider): need to copy origins. } else { + // Any other parameters mean we need bit-grained tracking of uninit data Size = DL.getTypeAllocSize(A->getType()); if (ArgOffset + Size > kParamTLSSize) break; Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, kShadowTLSAlignment); + Constant *Cst = dyn_cast(ArgShadow); if (Cst && Cst->isNullValue()) ArgIsInitialized = true; } @@ -3420,6 +3493,13 @@ // Don't emit the epilogue for musttail call returns. if (isa(CB) && cast(CB).isMustTailCall()) return; + + if (ClEagerChecks && !CB.hasRetAttr(Attribute::PartialInit)) { + setShadow(&CB, getCleanShadow(&CB)); + setOrigin(&CB, getCleanOrigin()); + return; + } + IRBuilder<> IRBBefore(&CB); // Until we have full dynamic coverage, make sure the retval shadow is 0. Value *Base = getShadowPtrForRetval(&CB, IRBBefore); @@ -3472,14 +3552,21 @@ // Don't emit the epilogue for musttail call returns. if (isAMustTailRetVal(RetVal)) return; Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); - if (CheckReturnValue) { + bool UseTLS = !ClEagerChecks || + F.hasAttribute(AttributeList::ReturnIndex, Attribute::PartialInit); + + Value *Shadow = getShadow(RetVal); + bool StoreOrigin = true; + if (CheckReturnValue || !UseTLS) { insertShadowCheck(RetVal, &I); - Value *Shadow = getCleanShadow(RetVal); - IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); - } else { - Value *Shadow = getShadow(RetVal); + Shadow = getCleanShadow(RetVal); + StoreOrigin = false; + } + + // The caller still expects information passed over TLS if we pass our check + if (UseTLS) { IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); - if (MS.TrackOrigins) + if (MS.TrackOrigins && StoreOrigin) IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); } } diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll @@ -0,0 +1,114 @@ +; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -msan-eager-checks -S -passes='module(msan-module),function(msan)' 2>&1 | \ +; RUN: FileCheck -allow-deprecated-dag-overlap -check-prefixes=CHECK,CHECK-ORIGINS %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @msan.module_ctor, i8* null } + +; Check the presence and the linkage type of __msan_track_origins and +; other interface symbols. +; CHECK-NOT: @__msan_track_origins +; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1 +; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0 +; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32 +; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64 +; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32 + + +define i32 @NormalRet() nounwind uwtable sanitize_memory { + ret i32 123 +} + +; CHECK-LABEL: @NormalRet +; CHECK: ret i32 + +define partialinit i32 @PartialRet() nounwind uwtable sanitize_memory { + ret i32 123 +} + +; CHECK-LABEL: @PartialRet +; CHECK: store i32 0,{{.*}}__msan_retval_tls +; CHECK-NOT: call void @__msan_warning_noreturn() +; CHECK: ret i32 + +define i32 @LoadedRet() nounwind uwtable sanitize_memory { + %p = inttoptr i64 0 to i32 * + %o = load i32, i32 *%p + ret i32 %o +} + +; CHECK-LABEL: @LoadedRet +; CHECK-NOT: __msan_retval_tls +; CHECK: call void @__msan_warning_noreturn() +; CHECK: unreachable +; CHECK-NOT: __msan_retval_tls +; CHECK: ret i32 %o + + +define void @NormalArg(i32 %a) nounwind uwtable sanitize_memory { + %p = inttoptr i64 0 to i32 * + store i32 %a, i32 *%p + ret void +} + +; CHECK-LABEL: @NormalArg +; CHECK-NOT: __msan_retval_tls +; CHECK: %p = inttoptr +; CHECK: ret void + +define void @PartialArg(i32 partialinit %a) nounwind uwtable sanitize_memory { + %p = inttoptr i64 0 to i32 * + store i32 %a, i32 *%p + ret void +} + +; CHECK-LABEL: @PartialArg +; CHECK: load {{.*}}__msan_param_tls +; CHECK: %p = inttoptr +; CHECK: ret void + +define void @CallNormal() nounwind uwtable sanitize_memory { + %r = call i32 @NormalRet() nounwind uwtable sanitize_memory + call void @NormalArg(i32 %r) nounwind uwtable sanitize_memory + ret void +} + +; CHECK-LABEL: @CallNormal +; CHECK: call i32 @NormalRet() +; CHECK-NOT: __msan_{{\w+}}_tls +; CHECK: call void @NormalArg +; CHECK: ret void + +define void @CallWithLoaded() nounwind uwtable sanitize_memory { + %p = inttoptr i64 0 to i32 * + %o = load i32, i32 *%p + call void @NormalArg(i32 %o) nounwind uwtable sanitize_memory + ret void +} + +; CHECK-LABEL: @CallWithLoaded +; CHECK: %p = inttoptr +; CHECK-NOT: __msan_{{\w+}}_tls +; CHECK: call void @__msan_warning_noreturn() +; CHECK: unreachable +; CHECK-NOT: __msan_{{\w+}}_tls +; CHECK: call void @NormalArg +; CHECK: ret void + +define void @CallPartial() nounwind uwtable sanitize_memory { + %r = call i32 @PartialRet() nounwind uwtable sanitize_memory + call void @PartialArg(i32 %r) nounwind uwtable sanitize_memory + ret void +} + +; CHECK-LABEL: @CallPartial +; CHECK: call i32 @PartialRet() +; CHECK: load {{.*}}__msan_retval_tls +; CHECK: store {{.*}}__msan_param_tls +; CHECK: call void @PartialArg +; CHECK: ret void