diff --git a/compiler-rt/test/msan/CMakeLists.txt b/compiler-rt/test/msan/CMakeLists.txt --- a/compiler-rt/test/msan/CMakeLists.txt +++ b/compiler-rt/test/msan/CMakeLists.txt @@ -8,7 +8,7 @@ darwin_filter_host_archs(MSAN_SUPPORTED_ARCH MSAN_TEST_ARCH) endif() -macro(add_msan_testsuite arch lld thinlto) +macro(add_msan_testsuite arch lld thinlto eager) set(MSAN_TEST_TARGET_ARCH ${arch}) get_test_cc_for_arch(${arch} MSAN_TEST_TARGET_CC MSAN_TEST_TARGET_CFLAGS) @@ -24,8 +24,12 @@ list(APPEND MSAN_TEST_DEPS lld) endif() endif() + if (${eager}) + set(CONFIG_NAME "eager-${CONFIG_NAME}") + endif() set(MSAN_TEST_USE_THINLTO ${thinlto}) set(MSAN_TEST_USE_LLD ${lld}) + set(MSAN_TEST_EAGER_CHECK ${eager}) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in @@ -34,10 +38,11 @@ endmacro() foreach(arch ${MSAN_TEST_ARCH}) - add_msan_testsuite(${arch} False False) + add_msan_testsuite(${arch} False False False) + add_msan_testsuite(${arch} False False True) if(COMPILER_RT_HAS_LLD AND arch STREQUAL "x86_64" AND NOT (APPLE OR WIN32)) - add_msan_testsuite(${arch} True False) + add_msan_testsuite(${arch} True False False) endif() endforeach() diff --git a/compiler-rt/test/msan/bitfield.cpp b/compiler-rt/test/msan/bitfield.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/test/msan/bitfield.cpp @@ -0,0 +1,46 @@ +// Test that MSAN does not trigger on oddly-sized bitfields + +// RUN: %clangxx_msan -O0 %s -o %t && %run %t +// RUN: %clangxx_msan -O1 %s -o %t && %run %t +// RUN: %clangxx_msan -O3 %s -o %t && %run %t + +#include + +struct small { + char field : 5; +}; + +struct multiple_promote24 { + int field1 : 23; + int field2 : 23; + int field3 : 23; +}; + +small make_small() { + small s; + ((volatile small *)&s)->field = 0; + return s; +} + +multiple_promote24 make_promote24() { + multiple_promote24 m; + ((volatile multiple_promote24 *)&m)->field1 = 0; + ((volatile multiple_promote24 *)&m)->field2 = 0; + ((volatile multiple_promote24 *)&m)->field3 = 0; + return m; +} + +int main(void) { + volatile small s = make_small(); + volatile multiple_promote24 m = make_promote24(); + + int check; + check = s.field; + __msan_check_mem_is_initialized(&check, sizeof check); + check = m.field1; + __msan_check_mem_is_initialized(&check, sizeof check); + check = m.field2; + __msan_check_mem_is_initialized(&check, sizeof check); + check = m.field3; + __msan_check_mem_is_initialized(&check, sizeof check); +} diff --git a/compiler-rt/test/msan/chained_origin.cpp b/compiler-rt/test/msan/chained_origin.cpp --- a/compiler-rt/test/msan/chained_origin.cpp +++ b/compiler-rt/test/msan/chained_origin.cpp @@ -20,12 +20,12 @@ volatile int x, y; __attribute__((noinline)) -void fn_g(int a) { +void fn_g(volatile int &a) { x = a; } __attribute__((noinline)) -void fn_f(int a) { +void fn_f(volatile int &a) { fn_g(a); } diff --git a/compiler-rt/test/msan/chained_origin_empty_stack.cpp b/compiler-rt/test/msan/chained_origin_empty_stack.cpp --- a/compiler-rt/test/msan/chained_origin_empty_stack.cpp +++ b/compiler-rt/test/msan/chained_origin_empty_stack.cpp @@ -18,12 +18,12 @@ volatile int x; __attribute__((noinline)) -void fn_g(int a) { +void fn_g(volatile int &a) { x = a; } __attribute__((noinline)) -void fn_f(int a) { +void fn_f(volatile int &a) { fn_g(a); } diff --git a/compiler-rt/test/msan/chained_origin_memcpy.cpp b/compiler-rt/test/msan/chained_origin_memcpy.cpp --- a/compiler-rt/test/msan/chained_origin_memcpy.cpp +++ b/compiler-rt/test/msan/chained_origin_memcpy.cpp @@ -23,12 +23,12 @@ volatile int idx = 30; __attribute__((noinline)) -void fn_g(int a, int b) { +void fn_g(volatile int &a, volatile int &b) { xx[idx] = a; xx[idx + 10] = b; } __attribute__((noinline)) -void fn_f(int a, int b) { +void fn_f(volatile int &a, volatile int &b) { fn_g(a, b); } diff --git a/compiler-rt/test/msan/cxa_atexit.cpp b/compiler-rt/test/msan/cxa_atexit.cpp --- a/compiler-rt/test/msan/cxa_atexit.cpp +++ b/compiler-rt/test/msan/cxa_atexit.cpp @@ -1,4 +1,5 @@ // RUN: %clangxx_msan -O0 %s -o %t && %run %t %p +// REQUIRES: !msan_eager_checks // PR17377: C++ module destructors get stale argument shadow. diff --git a/compiler-rt/test/msan/in-struct-padding.cpp b/compiler-rt/test/msan/in-struct-padding.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/test/msan/in-struct-padding.cpp @@ -0,0 +1,88 @@ +// Test that different forms of struct padding aren't checked my msan + +// RUN: %clangxx_msan -O0 %s -o %t && %run %t +// RUN: %clangxx_msan -O1 %s -o %t && %run %t +// RUN: %clangxx_msan -O3 %s -o %t && %run %t + +#include + +struct mini { + char a; + int b; +}; + +struct medium { + char a; + uint64_t b; +}; + +struct large { + char a; + uint64_t c[4]; +}; + +struct nested { + medium a; +}; + +struct superclass : medium { +}; + +template +void volatile_init(T &val) { + *(volatile T *)&val = 0; +} + +template +__attribute__((noinline)) +T passthru_byval(T t) { + return t; +} + +template +__attribute__((noinline)) +T &passthru_byref(T &t) { + return t; +} + +int main() { + mini m; + volatile_init(m.a); + volatile_init(m.b); + medium med; + volatile_init(med.a); + volatile_init(med.b); + large l; + volatile_init(l.a); + volatile_init(l.c[0]); + volatile_init(l.c[1]); + volatile_init(l.c[2]); + volatile_init(l.c[3]); + nested ns; + volatile_init(ns.a.a); + volatile_init(ns.a.b); + superclass sp; + volatile_init(sp.a); + volatile_init(sp.b); + + mini mval = passthru_byval(m); + mini &mref = passthru_byref(m); + + medium medval = passthru_byval(med); + medium &medref = passthru_byref(med); + + large lgval = passthru_byval(l); + large &lgref = passthru_byref(l); + + nested nsval = passthru_byval(ns); + nested &nsref = passthru_byref(ns); + + superclass spval = passthru_byval(sp); + superclass &spref = passthru_byref(sp); + + asm volatile ("" :: "r"(&mval), "r"(&mref)); + asm volatile ("" :: "r"(&medval), "r"(&medref)); + asm volatile ("" :: "r"(&lgval), "r"(&lgref)); + asm volatile ("" :: "r"(&nsval), "r"(&nsref)); + asm volatile ("" :: "r"(&spval), "r"(&spref)); +} diff --git a/compiler-rt/test/msan/insertvalue_origin.cpp b/compiler-rt/test/msan/insertvalue_origin.cpp --- a/compiler-rt/test/msan/insertvalue_origin.cpp +++ b/compiler-rt/test/msan/insertvalue_origin.cpp @@ -13,7 +13,7 @@ int y; }; -mypair my_make_pair(int64_t x, int y) { +mypair my_make_pair(const int64_t &x, const int &y) { mypair p; p.x = x; p.y = y; diff --git a/compiler-rt/test/msan/lit.site.cfg.py.in b/compiler-rt/test/msan/lit.site.cfg.py.in --- a/compiler-rt/test/msan/lit.site.cfg.py.in +++ b/compiler-rt/test/msan/lit.site.cfg.py.in @@ -7,6 +7,10 @@ config.use_lld = @MSAN_TEST_USE_LLD@ config.use_thinlto = @MSAN_TEST_USE_THINLTO@ +if @MSAN_TEST_EAGER_CHECK@: + config.target_cflags += " -mllvm -msan-eager-checks -DTEST_MSAN_EAGER_CHECKS" + config.available_features.add('msan_eager_checks') + # Load common config for all compiler-rt lit tests. lit_config.load_config(config, "@COMPILER_RT_BINARY_DIR@/test/lit.common.configured") diff --git a/compiler-rt/test/msan/no_sanitize_memory_prop.cpp b/compiler-rt/test/msan/no_sanitize_memory_prop.cpp --- a/compiler-rt/test/msan/no_sanitize_memory_prop.cpp +++ b/compiler-rt/test/msan/no_sanitize_memory_prop.cpp @@ -11,7 +11,7 @@ __attribute__((noinline)) __attribute__((weak)) __attribute__((no_sanitize_memory)) -int f(int x) { +int f(int &x) { return x; } diff --git a/compiler-rt/test/msan/param_tls_limit.cpp b/compiler-rt/test/msan/param_tls_limit.cpp --- a/compiler-rt/test/msan/param_tls_limit.cpp +++ b/compiler-rt/test/msan/param_tls_limit.cpp @@ -4,6 +4,7 @@ // RUN: %clangxx_msan -O0 %s -o %t && %run %t // RUN: %clangxx_msan -fsanitize-memory-track-origins -O0 %s -o %t && %run %t // RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -O0 %s -o %t && %run %t +// REQUIRES: !msan_eager_checks // // AArch64 fails with: // void f801(S<801>): Assertion `__msan_test_shadow(&s, sizeof(s)) == -1' failed diff --git a/compiler-rt/test/msan/parameter-mixing.cpp b/compiler-rt/test/msan/parameter-mixing.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/test/msan/parameter-mixing.cpp @@ -0,0 +1,96 @@ +// Mixes differently-sized parameters in fucntion calls +// RUN: %clangxx_msan -O0 %s -o %t && %run %t +// RUN: %clangxx_msan -O1 %s -o %t && %run %t +// RUN: %clangxx_msan -O3 %s -o %t && %run %t + +#include +#include + +union mini { + int a; + char b; +}; + +struct large { + char data[0x100]; +}; + +__attribute__((noinline)) +void poison_caller_tls_inner(large l) { + asm volatile ("" :: "r"(&l) ); +} +__attribute__((noinline)) +void poison_caller_tls() { + large l; + poison_caller_tls_inner(l); +} + +template +void volatile_init(T &val) { + *(volatile T *)&val = 0; +} + +template +__attribute__((noinline)) +T passthru(T &val) { + return val; +} + +__attribute__((noinline)) +void callee(mini m1, large l1, int i1, mini m2, large l2, int i2) { + mini m1_ = passthru(m1); + mini m2_ = passthru(m2); + large l1_ = passthru(l1); + large l2_ = passthru(l2); + int i1_ = passthru(i1); + int i2_ = passthru(i2); + + asm volatile ("" :: "r"(&m1_) ); + asm volatile ("" :: "r"(&m2_) ); + asm volatile ("" :: "r"(&l1_) ); + asm volatile ("" :: "r"(&l2_) ); + asm volatile ("" :: "r"(&i1_) ); + asm volatile ("" :: "r"(&l2_) ); +} + +__attribute__((noinline)) +void callee_va(int _unused, ...) { + va_list vl; + va_start(vl, _unused); + + mini m; + large l; + int i; + + m = va_arg(vl, mini); + l = va_arg(vl, large); + i = va_arg(vl, int); + + __msan_check_mem_is_initialized(&m, sizeof m); + __msan_check_mem_is_initialized(&l, sizeof l); + __msan_check_mem_is_initialized(&i, sizeof i); + + m = va_arg(vl, mini); + l = va_arg(vl, large); + i = va_arg(vl, int); + + __msan_check_mem_is_initialized(&m, sizeof m); + __msan_check_mem_is_initialized(&l, sizeof l); + __msan_check_mem_is_initialized(&i, sizeof i); + + va_end(vl); + +} + +int main(void) { + large l = {}; + mini m; + volatile_init(m.b); + int i = 0; + poison_caller_tls(); + callee(m, l, i, m, l, i); + + volatile_init(m.a); + poison_caller_tls(); + callee_va(0, m, l, i, m, l, i); +} diff --git a/compiler-rt/test/msan/qsort.cpp b/compiler-rt/test/msan/qsort.cpp --- a/compiler-rt/test/msan/qsort.cpp +++ b/compiler-rt/test/msan/qsort.cpp @@ -19,8 +19,10 @@ void poison_stack_and_param() { char x[10000]; +#ifndef TEST_MSAN_EAGER_CHECKS int y; dummy(y, y, y, y, y); +#endif } __attribute__((always_inline)) int cmp(long a, long b) { diff --git a/compiler-rt/test/msan/signal_stress_test.cpp b/compiler-rt/test/msan/signal_stress_test.cpp --- a/compiler-rt/test/msan/signal_stress_test.cpp +++ b/compiler-rt/test/msan/signal_stress_test.cpp @@ -4,6 +4,7 @@ // Reported deadly signal due to stack-overflow // XFAIL: netbsd +// XFAIL: msan_eager_checks #include #include diff --git a/compiler-rt/test/msan/stack-origin2.cpp b/compiler-rt/test/msan/stack-origin2.cpp --- a/compiler-rt/test/msan/stack-origin2.cpp +++ b/compiler-rt/test/msan/stack-origin2.cpp @@ -18,6 +18,8 @@ // RUN: %clangxx_msan -fsanitize-memory-track-origins -O3 %s -o %t && not %run %t >%t.out 2>&1 // RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-ORIGINS < %t.out +// REQUIRES: !msan_eager_checks + #include extern "C" diff --git a/compiler-rt/test/msan/unpoison_param.cpp b/compiler-rt/test/msan/unpoison_param.cpp --- a/compiler-rt/test/msan/unpoison_param.cpp +++ b/compiler-rt/test/msan/unpoison_param.cpp @@ -5,6 +5,7 @@ // RUN: %clangxx_msan -fno-sanitize=memory -c %s -o %t-main.o // RUN: %clangxx_msan %t-main.o %s -o %t // RUN: %run %t +// REQUIRES: !msan_eager_checks #include #include diff --git a/compiler-rt/test/msan/vararg.cpp b/compiler-rt/test/msan/vararg.cpp --- a/compiler-rt/test/msan/vararg.cpp +++ b/compiler-rt/test/msan/vararg.cpp @@ -1,16 +1,24 @@ -// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -O3 %s -o %t && \ +// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -DEXPECT_PASS -O3 %s -o %t && \ +// RUN: not %run %t va_arg_tls >%t.out 2>&1 +// RUN: not [ -s %t.out ] + +// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -DEXPECT_PASS -O3 %s -o %t && \ +// RUN: not %run %t overflow >%t.out 2>&1 +// RUN: not [ -s %t.out ] + +// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -DEXPECT_FAIL -O3 %s -o %t && \ // RUN: not %run %t va_arg_tls >%t.out 2>&1 // RUN: FileCheck %s --check-prefix=CHECK < %t.out -// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -O3 %s -o %t && \ +// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -DEXPECT_FAIL -O3 %s -o %t && \ // RUN: not %run %t overflow >%t.out 2>&1 // RUN: FileCheck %s --check-prefix=CHECK < %t.out -// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -O3 %s -o %t && \ +// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -DEXPECT_FAIL -O3 %s -o %t && \ // RUN: not %run %t va_arg_tls >%t.out 2>&1 // RUN: FileCheck %s --check-prefixes=CHECK,CHECK-ORIGIN < %t.out -// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -O3 %s -o %t && \ +// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -DEXPECT_FAIL -O3 %s -o %t && \ // RUN: not %run %t overflow >%t.out 2>&1 // RUN: FileCheck %s --check-prefixes=CHECK,CHECK-ORIGIN < %t.out @@ -24,6 +32,22 @@ #include #include +#include + +__attribute__((noinline)) +void dummy(long a, long b, long c, long d, long e) { + __asm__ __volatile__ (""::"r"(a), "r"(b), "r"(c), "r"(d), "r"(e)); +} + +__attribute__((noinline)) +void poison_stack_and_param() { + char x[10000]; +#ifndef TEST_MSAN_EAGER_CHECKS + int y; + dummy(y, y, y, y, y); +#endif +} + __attribute__((noinline)) int sum(int n, ...) { va_list args; @@ -39,15 +63,28 @@ } int main(int argc, char *argv[]) { +#if defined(EXPECT_FAIL) volatile int uninit; + #define PRE_RUN for (int i = 0; i < 6; i++) __msan_unpoison_param(i) + // CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value + // CHECK-ORIGIN: Uninitialized value was created by an allocation of 'uninit' in the stack frame of function 'main' +#elif defined(EXPECT_PASS) + volatile int uninit = 0; + #define PRE_RUN poison_stack_and_param() +#else + #error Bad configuration +#endif + volatile int a = 1, b = 2; if (argc == 2) { // Shadow/origin will be passed via va_arg_tls/va_arg_origin_tls. if (strcmp(argv[1], "va_arg_tls") == 0) { + PRE_RUN; return sum(3, uninit, a, b); } // Shadow/origin of |uninit| will be passed via overflow area. if (strcmp(argv[1], "overflow") == 0) { + PRE_RUN; return sum(7, a, a, a, a, a, a, uninit ); @@ -55,6 +92,3 @@ } return 0; } - -// CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value -// CHECK-ORIGIN: Uninitialized value was created by an allocation of 'uninit' in the stack frame of function 'main' diff --git a/compiler-rt/test/msan/vector_cvt.cpp b/compiler-rt/test/msan/vector_cvt.cpp --- a/compiler-rt/test/msan/vector_cvt.cpp +++ b/compiler-rt/test/msan/vector_cvt.cpp @@ -4,7 +4,7 @@ #include -int to_int(double v) { +int to_int(double &v) { __m128d t = _mm_set_sd(v); int x = _mm_cvtsd_si32(t); return x; diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -281,6 +281,10 @@ cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true)); +static cl::opt ClEagerChecks("msan-eager-checks", + cl::desc("check arguments and return values at function call boundaries"), + cl::Hidden, cl::init(false)); + static cl::opt ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics"), cl::Hidden, cl::init(false)); @@ -869,6 +873,36 @@ M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(), PointerType::get(IRB.getInt8Ty(), 0), IntptrTy); + // Don't check msan unaligned load functions which are explicitly designed + // to allow loading uninitialized values. + static const llvm::StringRef ExpectUninitRetval[] = { + "__sanitizer_unaligned_load16", "__sanitizer_unaligned_load32", "__sanitizer_unaligned_load64", + }; + for (auto &name : ExpectUninitRetval) { + if (Function *func = M.getFunction(name)) { + func->removeAttribute(AttributeList::ReturnIndex, Attribute::NoUndef); + for (auto *U : func->users()) { + CallBase *CB = cast(U); + CB->removeAttribute(AttributeList::ReturnIndex, Attribute::NoUndef); + } + } + } + + // Don't check msan unaligned store functions which are explicitly designed + // to allow storing uninitialized values + static const llvm::StringRef ExpectUninitOperands[] = { + "__sanitizer_unaligned_store16", "__sanitizer_unaligned_store32", "__sanitizer_unaligned_store64", + }; + for (auto &name : ExpectUninitOperands) { + if (Function *func = M.getFunction(name)) { + func->removeAttribute(AttributeList::FirstArgIndex+1, Attribute::NoUndef); + for (auto *U : func->users()) { + CallBase *CB = cast(U); + CB->removeAttribute(AttributeList::FirstArgIndex+1, Attribute::NoUndef); + } + } + } + if (CompileKernel) { createKernelApi(M); } else { @@ -1653,14 +1687,23 @@ LLVM_DEBUG(dbgs() << "Arg is not sized\n"); continue; } + + bool FArgByVal = FArg.hasByValAttr(); + bool FArgNoUndef = FArg.hasAttribute(Attribute::NoUndef); + bool FArgCheck = ClEagerChecks && !FArgByVal && FArgNoUndef; unsigned Size = FArg.hasByValAttr() ? DL.getTypeAllocSize(FArg.getParamByValType()) : DL.getTypeAllocSize(FArg.getType()); + if (A == &FArg) { bool Overflow = ArgOffset + Size > kParamTLSSize; - Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); - if (FArg.hasByValAttr()) { + if (FArgCheck) { + *ShadowPtr = getCleanShadow(V); + setOrigin(A, getCleanOrigin()); + continue; + } else if (FArgByVal) { + Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); // ByVal pointer itself has clean shadow. We copy the actual // argument shadow to the underlying memory. // Figure out maximal valid memcpy alignment. @@ -1685,6 +1728,8 @@ } *ShadowPtr = getCleanShadow(V); } else { + // Shadow over TLS + Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); if (Overflow) { // ParamTLS overflow. *ShadowPtr = getCleanShadow(V); @@ -1703,7 +1748,9 @@ setOrigin(A, getCleanOrigin()); } } - ArgOffset += alignTo(Size, kShadowTLSAlignment); + + if (!FArgCheck) + ArgOffset += alignTo(Size, kShadowTLSAlignment); } assert(*ShadowPtr && "Could not find shadow for an argument"); return *ShadowPtr; @@ -3394,7 +3441,16 @@ << " Shadow: " << *ArgShadow << "\n"); bool ArgIsInitialized = false; const DataLayout &DL = F.getParent()->getDataLayout(); - if (CB.paramHasAttr(i, Attribute::ByVal)) { + + bool ByVal = CB.paramHasAttr(i, Attribute::ByVal); + bool NoUndef = CB.paramHasAttr(i, Attribute::NoUndef); + bool Check = ClEagerChecks && !ByVal && NoUndef; + + if (Check) { + insertShadowCheck(A, &CB); + continue; + } else if (ByVal) { + // ByVal requires some special handling as it's too big for a single load assert(A->getType()->isPointerTy() && "ByVal argument is not a pointer!"); Size = DL.getTypeAllocSize(CB.getParamByValType(i)); @@ -3412,10 +3468,12 @@ Alignment, Size); // TODO(glider): need to copy origins. } else { + // Any other parameters mean we need bit-grained tracking of uninit data Size = DL.getTypeAllocSize(A->getType()); if (ArgOffset + Size > kParamTLSSize) break; Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, kShadowTLSAlignment); + Constant *Cst = dyn_cast(ArgShadow); if (Cst && Cst->isNullValue()) ArgIsInitialized = true; } @@ -3440,6 +3498,13 @@ // Don't emit the epilogue for musttail call returns. if (isa(CB) && cast(CB).isMustTailCall()) return; + + if (ClEagerChecks && CB.hasRetAttr(Attribute::NoUndef)) { + setShadow(&CB, getCleanShadow(&CB)); + setOrigin(&CB, getCleanOrigin()); + return; + } + IRBuilder<> IRBBefore(&CB); // Until we have full dynamic coverage, make sure the retval shadow is 0. Value *Base = getShadowPtrForRetval(&CB, IRBBefore); @@ -3492,14 +3557,21 @@ // Don't emit the epilogue for musttail call returns. if (isAMustTailRetVal(RetVal)) return; Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); - if (CheckReturnValue) { + bool GlobalChecks = ClEagerChecks && + F.hasAttribute(AttributeList::ReturnIndex, Attribute::NoUndef); + + Value *Shadow = getShadow(RetVal); + bool StoreOrigin = true; + if (CheckReturnValue || GlobalChecks) { insertShadowCheck(RetVal, &I); - Value *Shadow = getCleanShadow(RetVal); - IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); - } else { - Value *Shadow = getShadow(RetVal); + Shadow = getCleanShadow(RetVal); + StoreOrigin = false; + } + + // The caller still expects information passed over TLS if we pass our check + if (!GlobalChecks) { IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); - if (MS.TrackOrigins) + if (MS.TrackOrigins && StoreOrigin) IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); } } diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll @@ -0,0 +1,114 @@ +; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -msan-eager-checks -S -passes='module(msan-module),function(msan)' 2>&1 | \ +; RUN: FileCheck -allow-deprecated-dag-overlap -check-prefixes=CHECK,CHECK-ORIGINS %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @msan.module_ctor, i8* null } + +; Check the presence and the linkage type of __msan_track_origins and +; other interface symbols. +; CHECK-NOT: @__msan_track_origins +; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1 +; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0 +; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32 +; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64 +; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32 + + +define noundef i32 @NormalRet() nounwind uwtable sanitize_memory { + ret i32 123 +} + +; CHECK-LABEL: @NormalRet +; CHECK: ret i32 + +define i32 @PartialRet() nounwind uwtable sanitize_memory { + ret i32 123 +} + +; CHECK-LABEL: @PartialRet +; CHECK: store i32 0,{{.*}}__msan_retval_tls +; CHECK-NOT: call void @__msan_warning_noreturn() +; CHECK: ret i32 + +define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory { + %p = inttoptr i64 0 to i32 * + %o = load i32, i32 *%p + ret i32 %o +} + +; CHECK-LABEL: @LoadedRet +; CHECK-NOT: __msan_retval_tls +; CHECK: call void @__msan_warning_noreturn() +; CHECK: unreachable +; CHECK-NOT: __msan_retval_tls +; CHECK: ret i32 %o + + +define void @NormalArg(i32 noundef %a) nounwind uwtable sanitize_memory { + %p = inttoptr i64 0 to i32 * + store i32 %a, i32 *%p + ret void +} + +; CHECK-LABEL: @NormalArg +; CHECK-NOT: __msan_retval_tls +; CHECK: %p = inttoptr +; CHECK: ret void + +define void @PartialArg(i32 %a) nounwind uwtable sanitize_memory { + %p = inttoptr i64 0 to i32 * + store i32 %a, i32 *%p + ret void +} + +; CHECK-LABEL: @PartialArg +; CHECK: load {{.*}}__msan_param_tls +; CHECK: %p = inttoptr +; CHECK: ret void + +define void @CallNormal() nounwind uwtable sanitize_memory { + %r = call i32 @NormalRet() nounwind uwtable sanitize_memory + call void @NormalArg(i32 %r) nounwind uwtable sanitize_memory + ret void +} + +; CHECK-LABEL: @CallNormal +; CHECK: call i32 @NormalRet() +; CHECK-NOT: __msan_{{\w+}}_tls +; CHECK: call void @NormalArg +; CHECK: ret void + +define void @CallWithLoaded() nounwind uwtable sanitize_memory { + %p = inttoptr i64 0 to i32 * + %o = load i32, i32 *%p + call void @NormalArg(i32 %o) nounwind uwtable sanitize_memory + ret void +} + +; CHECK-LABEL: @CallWithLoaded +; CHECK: %p = inttoptr +; CHECK-NOT: __msan_{{\w+}}_tls +; CHECK: call void @__msan_warning_noreturn() +; CHECK: unreachable +; CHECK-NOT: __msan_{{\w+}}_tls +; CHECK: call void @NormalArg +; CHECK: ret void + +define void @CallPartial() nounwind uwtable sanitize_memory { + %r = call i32 @PartialRet() nounwind uwtable sanitize_memory + call void @PartialArg(i32 %r) nounwind uwtable sanitize_memory + ret void +} + +; CHECK-LABEL: @CallPartial +; CHECK: call i32 @PartialRet() +; CHECK: load {{.*}}__msan_retval_tls +; CHECK: store {{.*}}__msan_param_tls +; CHECK: call void @PartialArg +; CHECK: ret void