diff --git a/compiler-rt/test/msan/libatomic.c b/compiler-rt/test/msan/libatomic.c new file mode 100644 --- /dev/null +++ b/compiler-rt/test/msan/libatomic.c @@ -0,0 +1,42 @@ +// RUN: %clang_msan -fsanitize-memory-track-origins=2 -latomic -DTEST_STORE -O0 %s -o %t && %run %t 2>&1 +// RUN: %clang_msan -fsanitize-memory-track-origins=0 -latomic -DTEST_LOAD -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clang_msan -fsanitize-memory-track-origins=2 -latomic -DTEST_LOAD -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-SHADOW + +// PPC has no libatomic +// UNSUPPORTED: powerpc64-target-arch +// UNSUPPORTED: powerpc64le-target-arch + +#include +#include + +typedef struct __attribute((packed)) { + uint8_t val[3]; +} i24; + +void copy(i24 *dst, i24 *src); + +int main() { + i24 uninit; + i24 init = {0}; + + __msan_check_mem_is_initialized(&init, 3); + copy(&init, &uninit); + __msan_check_mem_is_initialized(&init, 3); +} + +void copy(i24 *dst, i24 *src) { +#ifdef TEST_LOAD + __atomic_load(src, dst, __ATOMIC_RELAXED); + + // CHECK: MemorySanitizer: use-of-uninitialized-value + // CHECK: #0 {{0x[a-f0-9]+}} in main{{.*}}libatomic.c:[[@LINE-8]] + + // CHECK-SHADOW: Uninitialized value was stored to memory at + // CHECK-SHADOW: #0 {{0x[a-f0-9]+}} in msan.atomic_load.thunk + // CHECK-SHADOW: #1 {{0x[a-f0-9]+}} in copy{{.*}}libatomic.c:[[@LINE-7]] +#endif +#ifdef TEST_STORE + // Store always writes a clean shadow + __atomic_store(src, dst, __ATOMIC_RELAXED); +#endif +} diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -339,6 +339,7 @@ cl::Hidden, cl::init(0)); static const char *const kMsanModuleCtorName = "msan.module_ctor"; +static const char *const kMsanLibAtomicLoadThunkName = "msan.atomic_load.thunk"; static const char *const kMsanInitName = "__msan_init"; namespace { @@ -573,6 +574,9 @@ /// uninitialized value and returns an updated origin id encoding this info. FunctionCallee MsanChainOriginFn; + /// Run-time helper that paints an origin over a region. + FunctionCallee MsanSetOriginFn; + /// MSan runtime replacements for memmove, memcpy and memset. FunctionCallee MemmoveFn, MemcpyFn, MemsetFn; @@ -851,6 +855,9 @@ // instrumentation. MsanChainOriginFn = M.getOrInsertFunction( "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty()); + MsanSetOriginFn = + M.getOrInsertFunction("__msan_set_origin", IRB.getVoidTy(), + IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty()); MemmoveFn = M.getOrInsertFunction( "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); @@ -1818,6 +1825,24 @@ llvm_unreachable("Unknown ordering"); } + Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB) { + constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1; + uint32_t OrderingTable[NumOrderings] = {}; + + OrderingTable[(int)AtomicOrderingCABI::relaxed] = + OrderingTable[(int)AtomicOrderingCABI::release] = + (int)AtomicOrderingCABI::release; + OrderingTable[(int)AtomicOrderingCABI::consume] = + OrderingTable[(int)AtomicOrderingCABI::acquire] = + OrderingTable[(int)AtomicOrderingCABI::acq_rel] = + (int)AtomicOrderingCABI::acq_rel; + OrderingTable[(int)AtomicOrderingCABI::seq_cst] = + (int)AtomicOrderingCABI::seq_cst; + + return ConstantDataVector::get(IRB.getContext(), + makeArrayRef(OrderingTable, NumOrderings)); + } + AtomicOrdering addAcquireOrdering(AtomicOrdering a) { switch (a) { case AtomicOrdering::NotAtomic: @@ -1835,6 +1860,24 @@ llvm_unreachable("Unknown ordering"); } + Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB) { + constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1; + uint32_t OrderingTable[NumOrderings] = {}; + + OrderingTable[(int)AtomicOrderingCABI::relaxed] = + OrderingTable[(int)AtomicOrderingCABI::acquire] = + OrderingTable[(int)AtomicOrderingCABI::consume] = + (int)AtomicOrderingCABI::acquire; + OrderingTable[(int)AtomicOrderingCABI::release] = + OrderingTable[(int)AtomicOrderingCABI::acq_rel] = + (int)AtomicOrderingCABI::acq_rel; + OrderingTable[(int)AtomicOrderingCABI::seq_cst] = + (int)AtomicOrderingCABI::seq_cst; + + return ConstantDataVector::get(IRB.getContext(), + makeArrayRef(OrderingTable, NumOrderings)); + } + // ------------------- Visitors. using InstVisitor::visit; void visit(Instruction &I) { @@ -3451,6 +3494,88 @@ } } + Function *createLibAtomicLoadThunk(CallBase &CB) { + Module *M = CB.getModule(); + Type *Void = Type::getVoidTy(M->getContext()); + if (Function *Thunk = M->getFunction(kMsanLibAtomicLoadThunkName)) + return Thunk; + + Type *SizeType = CB.getArgOperand(0)->getType(); + Type *SrcPtrType = CB.getArgOperand(1)->getType(); + Type *DstPtrType = CB.getArgOperand(2)->getType(); + Type *OrderingType = CB.getArgOperand(3)->getType(); + + FunctionType *ThunkTy = FunctionType::get(Void, {SizeType, SrcPtrType, DstPtrType, OrderingType}, false); + Function *Thunk = Function::Create(ThunkTy, GlobalValue::LinkOnceAnyLinkage, kMsanLibAtomicLoadThunkName, *M); + Thunk->addFnAttr("frame-pointer", "all"); + BasicBlock *BB = BasicBlock::Create(M->getContext(), "", Thunk); + + Value *Size = Thunk->getArg(0); + Value *SrcPtr = Thunk->getArg(1); + Value *DstPtr = Thunk->getArg(2); + Value *Ordering = Thunk->getArg(3); + + IRBuilder<> IRB(BB); + // IRB.SetCurrentDebugLocation(CB.getDebugLoc()); + + // Convert the call to have at least Acquire ordering to make sure + // the shadow operations aren't reordered before it. + Value *NewOrdering = + IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering); + + FunctionCallee LibAtomicLoad(CB.getCalledFunction()); + IRB.CreateCall(LibAtomicLoad, {Size, SrcPtr, DstPtr, NewOrdering}); + + Value *SrcShadowPtr, *SrcOriginPtr; + std::tie(SrcShadowPtr, SrcOriginPtr) = + getShadowOriginPtr(SrcPtr, IRB, IRB.getInt8Ty(), Align(1), + /*isStore*/ false); + Value *DstShadowPtr = + getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), Align(1), + /*isStore*/ true) + .first; + + IRB.CreateMemCpy(DstShadowPtr, Align(1), SrcShadowPtr, Align(1), Size); + if (MS.TrackOrigins) { + Value *SrcOrigin = IRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr, + kMinOriginAlignment); + Value *NewOrigin = updateOrigin(SrcOrigin, IRB); + IRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin}); + } + + IRB.CreateRetVoid(); + return Thunk; + } + + void visitLibAtomicLoad(CallBase &CB) { + // Have to create a thunk call for this, since it's possible the caller + // will invoke the load with no succeeding block. But we still have to + // find a way to put instrumentation after the call. + FunctionCallee ThunkCallee(createLibAtomicLoadThunk(CB)); + CB.setCalledFunction(ThunkCallee); + } + + void visitLibAtomicStore(CallBase &CB) { + IRBuilder<> IRB(&CB); + Value *Size = CB.getArgOperand(0); + Value *DstPtr = CB.getArgOperand(2); + Value *Ordering = CB.getArgOperand(3); + // Convert the call to have at least Release ordering to make sure + // the shadow operations aren't reordered after it. + Value *NewOrdering = + IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering); + CB.setArgOperand(3, NewOrdering); + + Value *DstShadowPtr = + getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), Align(1), + /*isStore*/ true) + .first; + + // Atomic store always paints clean shadow/origin. See file header. + IRB.CreateMemSet(DstShadowPtr, getCleanShadow(IRB.getInt8Ty()), Size, + Align(1)); + } + void visitCallBase(CallBase &CB) { assert(!CB.getMetadata("nosanitize")); if (CB.isInlineAsm()) { @@ -3464,6 +3589,23 @@ visitInstruction(CB); return; } + LibFunc LF; + if (TLI->getLibFunc(CB, LF)) { + // libatomic.a functions need to have special handling because there isn't + // a good way to intercept them or compile the library with + // instrumentation. + switch (LF) { + case LibFunc_atomic_load: + visitLibAtomicLoad(CB); + return; + case LibFunc_atomic_store: + visitLibAtomicStore(CB); + return; + default: + break; + } + } + if (auto *Call = dyn_cast(&CB)) { assert(!isa(Call) && "intrinsics are handled elsewhere"); @@ -5155,6 +5297,8 @@ bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) { if (!CompileKernel && F.getName() == kMsanModuleCtorName) return false; + if (F.getName() == kMsanLibAtomicLoadThunkName) + return false; MemorySanitizerVisitor Visitor(F, *this, TLI); diff --git a/llvm/test/Instrumentation/MemorySanitizer/libatomic.ll b/llvm/test/Instrumentation/MemorySanitizer/libatomic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/libatomic.ll @@ -0,0 +1,71 @@ +; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s +; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S -passes=msan 2>&1 | FileCheck %s -check-prefixes=CHECK,CHECK-ORIGIN +; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +declare void @__atomic_load(i64, i8*, i8*, i32) +declare void @__atomic_store(i64, i8*, i8*, i32) + +define i24 @odd_sized_load(i24* %ptr) sanitize_memory { +; CHECK: @odd_sized_load(i24* {{.*}}[[PTR:%.+]]) +; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1 +; CHECK-ORIGIN: @__msan_set_alloca_origin +; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8* +; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8* +; CHECK: call void @msan.atomic_load.thunk(i64 3, i8* [[PTR_I8]], i8* [[VAL_PTR_I8]], i32 0) +; CHECK: [[VAL:%.*]] = load i24, i24* [[VAL_PTR]] +; CHECK: ret i24 [[VAL]] + %val_ptr = alloca i24, align 1 + %val_ptr_i8 = bitcast i24* %val_ptr to i8* + %ptr_i8 = bitcast i24* %ptr to i8* + call void @__atomic_load(i64 3, i8* %ptr_i8, i8* %val_ptr_i8, i32 0) + %val = load i24, i24* %val_ptr + ret i24 %val +} + +define void @odd_sized_store(i24* %ptr, i24 %val) sanitize_memory { +; CHECK: @odd_sized_store(i24* {{.*}}[[PTR:%.+]], i24 {{.*}}[[VAL:%.+]]) +; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1 +; CHECK: store i24 [[VAL]], i24* [[VAL_PTR]] +; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8* +; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8* + +; CHECK: ptrtoint i8* [[PTR_I8]] +; CHECK: xor +; CHECK: [[SPTR_I8:%.*]] = inttoptr +; CHECK: call void @llvm.memset{{.*}}(i8* align 1 [[SPTR_I8]], i8 0, i64 3 + +; CHECK: call void @__atomic_store(i64 3, i8* [[VAL_PTR_I8]], i8* [[PTR_I8]], i32 3) +; CHECK: ret void + %val_ptr = alloca i24, align 1 + store i24 %val, i24* %val_ptr + %val_ptr_i8 = bitcast i24* %val_ptr to i8* + %ptr_i8 = bitcast i24* %ptr to i8* + call void @__atomic_store(i64 3, i8* %val_ptr_i8, i8* %ptr_i8, i32 0) + ret void +} + +; CHECK: @msan.atomic_load.thunk(i64 [[LEN:%.*]], i8* [[PTR_I8:%.*]], i8* [[VAL_PTR_I8:%.*]], i32 [[ORDERING:%.*]]) +; CHECK: [[NEW_ORDERING:%.*]] = extractelement {{.*}}, i32 [[ORDERING]] +; CHECK: call void @__atomic_load(i64 [[LEN]], i8* [[PTR_I8]], i8* [[VAL_PTR_I8]], i32 [[NEW_ORDERING]]) +; CHECK: ptrtoint i8* [[PTR_I8]] +; CHECK: xor +; CHECK: [[SPTR_I8:%.*]] = inttoptr +; CHECK-ORIGIN: add +; CHECK-ORIGIN: and +; CHECK-ORIGIN: [[OPTR:%.*]] = inttoptr + +; CHECK: ptrtoint i8* [[VAL_PTR_I8]] +; CHECK: xor +; CHECK: [[VAL_SPTR_I8:%.*]] = inttoptr +; CHECK-ORIGIN: add +; CHECK-ORIGIN: and +; CHECK-ORIGIN: [[VAL_OPTR:%.*]] = inttoptr + +; CHECK: call void @llvm.memcpy{{.*}}(i8* align 1 [[VAL_SPTR_I8]], i8* align 1 [[SPTR_I8]], i64 [[LEN]] + +; CHECK-ORIGIN: [[ARG_ORIGIN:%.*]] = load i32, i32* [[OPTR]] +; CHECK-ORIGIN: [[VAL_ORIGIN:%.*]] = call i32 @__msan_chain_origin(i32 [[ARG_ORIGIN]]) +; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[VAL_PTR_I8]], i64 [[LEN]], i32 [[VAL_ORIGIN]]) +; CHECK: ret void