diff --git a/compiler-rt/test/msan/libatomic.c b/compiler-rt/test/msan/libatomic.c new file mode 100644 --- /dev/null +++ b/compiler-rt/test/msan/libatomic.c @@ -0,0 +1,37 @@ +// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -latomic -DTEST_STORE -O0 %s -o %t && %run %t 2>&1 +// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -latomic -DTEST_LOAD -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -latomic -DTEST_LOAD -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-SHADOW + +#include +#include + +typedef struct __attribute((packed)) { + uint8_t val[3]; +} i24; + +void copy(i24 *dst, i24 *src); + +int main() { + i24 uninit; + i24 init = {0}; + + __msan_check_mem_is_initialized(&init, 3); + copy(&init, &uninit); + __msan_check_mem_is_initialized(&init, 3); +} + +void copy(i24 *dst, i24 *src) { +#ifdef TEST_LOAD + __atomic_load(src, dst, __ATOMIC_RELAXED); + + // CHECK: MemorySanitizer: use-of-uninitialized-value + // CHECK: #0 {{0x[a-f0-9]+}} in main{{.*}}libatomic.c:[[@LINE-8]] + + // CHECK-SHADOW: Uninitialized value was stored to memory at + // CHECK-SHADOW: #0 {{0x[a-f0-9]+}} in copy{{.*}}libatomic.c:[[@LINE+6]] +#endif +#ifdef TEST_STORE + // Store always writes a clean shadow + __atomic_store(src, dst, __ATOMIC_RELAXED); +#endif +} diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.def b/llvm/include/llvm/Analysis/TargetLibraryInfo.def --- a/llvm/include/llvm/Analysis/TargetLibraryInfo.def +++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.def @@ -262,6 +262,12 @@ /// long double __atanhl_finite(long double x); TLI_DEFINE_ENUM_INTERNAL(atanhl_finite) TLI_DEFINE_STRING_INTERNAL("__atanhl_finite") +/// void __atomic_load(size_t size, void *mptr, void *vptr, int smodel); +TLI_DEFINE_ENUM_INTERNAL(atomic_load) +TLI_DEFINE_STRING_INTERNAL("__atomic_load") +/// void __atomic_store(size_t size, void *mptr, void *vptr, int smodel); +TLI_DEFINE_ENUM_INTERNAL(atomic_store) +TLI_DEFINE_STRING_INTERNAL("__atomic_store") /// double __cosh_finite(double x); TLI_DEFINE_ENUM_INTERNAL(cosh_finite) TLI_DEFINE_STRING_INTERNAL("__cosh_finite") diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp --- a/llvm/lib/Analysis/TargetLibraryInfo.cpp +++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp @@ -1228,6 +1228,15 @@ case LibFunc_ZdaPvmSt11align_val_t: return (NumParams == 3 && FTy.getParamType(0)->isPointerTy()); + case LibFunc_atomic_load: + // void __atomic_load(size_t, void *, void *, int) + case LibFunc_atomic_store: + // void __atomic_store(size_t, void *, void *, int) + return (NumParams == 4 && FTy.getParamType(0)->isIntegerTy() && + FTy.getParamType(1)->isPointerTy() && + FTy.getParamType(2)->isPointerTy() && + FTy.getParamType(3)->isIntegerTy()); + case LibFunc_memset_pattern16: return (!FTy.isVarArg() && NumParams == 3 && FTy.getParamType(0)->isPointerTy() && diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -571,6 +571,9 @@ /// uninitialized value and returns an updated origin id encoding this info. FunctionCallee MsanChainOriginFn; + /// Run-time helper that paints an origin over a region. + FunctionCallee MsanSetOriginFn; + /// MSan runtime replacements for memmove, memcpy and memset. FunctionCallee MemmoveFn, MemcpyFn, MemsetFn; @@ -849,6 +852,9 @@ // instrumentation. MsanChainOriginFn = M.getOrInsertFunction( "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty()); + MsanSetOriginFn = + M.getOrInsertFunction("__msan_set_origin", IRB.getVoidTy(), + IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty()); MemmoveFn = M.getOrInsertFunction( "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); @@ -1851,6 +1857,24 @@ llvm_unreachable("Unknown ordering"); } + Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB) { + constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1; + uint32_t OrderingTable[NumOrderings] = {}; + + OrderingTable[(int)AtomicOrderingCABI::relaxed] = + OrderingTable[(int)AtomicOrderingCABI::release] = + (int)AtomicOrderingCABI::release; + OrderingTable[(int)AtomicOrderingCABI::consume] = + OrderingTable[(int)AtomicOrderingCABI::acquire] = + OrderingTable[(int)AtomicOrderingCABI::acq_rel] = + (int)AtomicOrderingCABI::acq_rel; + OrderingTable[(int)AtomicOrderingCABI::seq_cst] = + (int)AtomicOrderingCABI::seq_cst; + + return ConstantDataVector::get(IRB.getContext(), + makeArrayRef(OrderingTable, NumOrderings)); + } + AtomicOrdering addAcquireOrdering(AtomicOrdering a) { switch (a) { case AtomicOrdering::NotAtomic: @@ -1868,6 +1892,24 @@ llvm_unreachable("Unknown ordering"); } + Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB) { + constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1; + uint32_t OrderingTable[NumOrderings] = {}; + + OrderingTable[(int)AtomicOrderingCABI::relaxed] = + OrderingTable[(int)AtomicOrderingCABI::acquire] = + OrderingTable[(int)AtomicOrderingCABI::consume] = + (int)AtomicOrderingCABI::acquire; + OrderingTable[(int)AtomicOrderingCABI::release] = + OrderingTable[(int)AtomicOrderingCABI::acq_rel] = + (int)AtomicOrderingCABI::acq_rel; + OrderingTable[(int)AtomicOrderingCABI::seq_cst] = + (int)AtomicOrderingCABI::seq_cst; + + return ConstantDataVector::get(IRB.getContext(), + makeArrayRef(OrderingTable, NumOrderings)); + } + // ------------------- Visitors. using InstVisitor::visit; void visit(Instruction &I) { @@ -3486,6 +3528,63 @@ } } + void visitLibAtomicLoad(CallBase &CB) { + IRBuilder<> IRB(&CB); + Value *Size = CB.getArgOperand(0); + Value *SrcPtr = CB.getArgOperand(1); + Value *DstPtr = CB.getArgOperand(2); + Value *Ordering = CB.getArgOperand(3); + // Convert the call to have at least Acquire ordering to make sure + // the shadow operations aren't reordered before it. + Value *NewOrdering = + IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering); + CB.setArgOperand(3, NewOrdering); + + IRBuilder<> NextIRB(CB.getNextNode()); + Align AlignOne = assumeAligned(1); + auto SrcShadowOriginPair = + getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(), AlignOne, + /*isStore*/ false); + auto DstShadowPtr = + getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(), AlignOne, + /*isStore*/ false) + .first; + + NextIRB.CreateMemCpy(DstShadowPtr, AlignOne, SrcShadowOriginPair.first, + AlignOne, Size); + if (MS.TrackOrigins) { + Value *SrcOrigin = NextIRB.CreateAlignedLoad( + MS.OriginTy, SrcShadowOriginPair.second, kMinOriginAlignment); + Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB); + NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin}); + } + } + + void visitLibAtomicStore(CallBase &CB) { + IRBuilder<> IRB(&CB); + Value *Size = CB.getArgOperand(0); + Value *DstPtr = CB.getArgOperand(2); + Value *Ordering = CB.getArgOperand(3); + // Convert the call to have at least Release ordering to make sure + // the shadow operations aren't reordered after it. + Value *NewOrdering = + IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering); + CB.setArgOperand(3, NewOrdering); + + Align AlignOne = assumeAligned(1); + auto DstShadowOriginPair = + getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), AlignOne, + /*isStore*/ false); + + // Atomic store always paints clean shadow/origin. See file header. + IRB.CreateMemSet(DstShadowOriginPair.first, getCleanShadow(IRB.getInt8Ty()), + Size, AlignOne); + if (MS.TrackOrigins) { + Value *SrcOrigin = getCleanOrigin(); + IRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, SrcOrigin}); + } + } + void visitCallBase(CallBase &CB) { assert(!CB.getMetadata("nosanitize")); if (CB.isInlineAsm()) { @@ -3499,6 +3598,23 @@ visitInstruction(CB); return; } + LibFunc LF; + if (TLI->getLibFunc(CB, LF)) { + // libatomic.a functions need to have special handling because there isn't + // a good way to intercept them or compile the library with + // instrumentation. + switch (LF) { + case LibFunc_atomic_load: + visitLibAtomicLoad(CB); + return; + case LibFunc_atomic_store: + visitLibAtomicStore(CB); + return; + default: + break; + } + } + if (auto *Call = dyn_cast(&CB)) { assert(!isa(Call) && "intrinsics are handled elsewhere"); diff --git a/llvm/test/Instrumentation/MemorySanitizer/libatomic.ll b/llvm/test/Instrumentation/MemorySanitizer/libatomic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/libatomic.ll @@ -0,0 +1,70 @@ +; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s +; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S -passes=msan 2>&1 | FileCheck %s -check-prefixes=CHECK,CHECK-ORIGIN +; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +declare void @__atomic_load(i64, i8*, i8*, i32) +declare void @__atomic_store(i64, i8*, i8*, i32) + +define i24 @odd_sized_load(i24* %ptr) sanitize_memory { +; CHECK: @odd_sized_load(i24* {{.*}}[[PTR:%.+]]) +; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1 +; CHECK-ORIGIN: @__msan_set_alloca_origin +; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8* +; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8* +; CHECK: call void @__atomic_load(i64 3, i8* [[PTR_I8]], i8* [[VAL_PTR_I8]], i32 2) + +; CHECK: ptrtoint i8* [[PTR_I8]] +; CHECK: xor +; CHECK: [[SPTR_I8:%.*]] = inttoptr +; CHECK-ORIGIN: add +; CHECK-ORIGIN: and +; CHECK-ORIGIN: [[OPTR:%.*]] = inttoptr + +; CHECK: ptrtoint i8* [[VAL_PTR_I8]] +; CHECK: xor +; CHECK: [[VAL_SPTR_I8:%.*]] = inttoptr +; CHECK-ORIGIN: add +; CHECK-ORIGIN: and +; CHECK-ORIGIN: [[VAL_OPTR:%.*]] = inttoptr + +; CHECK: call void @llvm.memcpy{{.*}}(i8* align 1 [[VAL_SPTR_I8]], i8* align 1 [[SPTR_I8]], i64 3 + +; CHECK-ORIGIN: [[ARG_ORIGIN:%.*]] = load i32, i32* [[OPTR]] +; CHECK-ORIGIN: [[VAL_ORIGIN:%.*]] = call i32 @__msan_chain_origin(i32 [[ARG_ORIGIN]]) +; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[VAL_PTR_I8]], i64 3, i32 [[VAL_ORIGIN]]) + +; CHECK: [[VAL:%.*]] = load i24, i24* [[VAL_PTR]] +; CHECK: ret i24 [[VAL]] + %val_ptr = alloca i24, align 1 + %val_ptr_i8 = bitcast i24* %val_ptr to i8* + %ptr_i8 = bitcast i24* %ptr to i8* + call void @__atomic_load(i64 3, i8* %ptr_i8, i8* %val_ptr_i8, i32 0) + %val = load i24, i24* %val_ptr + ret i24 %val +} + +define void @odd_sized_store(i24* %ptr, i24 %val) sanitize_memory { +; CHECK: @odd_sized_store(i24* {{.*}}[[PTR:%.+]], i24 {{.*}}[[VAL:%.+]]) +; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1 +; CHECK: store i24 [[VAL]], i24* [[VAL_PTR]] +; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8* +; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8* + +; CHECK: ptrtoint i8* [[PTR_I8]] +; CHECK: xor +; CHECK: [[SPTR_I8:%.*]] = inttoptr +; CHECK: call void @llvm.memset{{.*}}(i8* align 1 [[SPTR_I8]], i8 0, i64 3 +; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[PTR_I8]], i64 3, i32 0) + +; CHECK: call void @__atomic_store(i64 3, i8* [[VAL_PTR_I8]], i8* [[PTR_I8]], i32 3) +; CHECK: ret void + %val_ptr = alloca i24, align 1 + store i24 %val, i24* %val_ptr + %val_ptr_i8 = bitcast i24* %val_ptr to i8* + %ptr_i8 = bitcast i24* %ptr to i8* + call void @__atomic_store(i64 3, i8* %val_ptr_i8, i8* %ptr_i8, i32 0) + ret void +} +