diff --git a/compiler-rt/test/dfsan/atomic.cpp b/compiler-rt/test/dfsan/atomic.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/test/dfsan/atomic.cpp @@ -0,0 +1,45 @@ +// RUN: %clangxx_dfsan %s -fno-exceptions -o %t && %run %t +// RUN: %clangxx_dfsan -mllvm -dfsan-track-origins=1 -mllvm -dfsan-fast-16-labels=true %s -fno-exceptions -o %t && %run %t +// +// Use -fno-exceptions to turn off exceptions to avoid instrumenting +// __cxa_begin_catch, std::terminate and __gxx_personality_v0. +// +// TODO: Support builtin atomics. For example, https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html +// DFSan instrumentation pass cannot identify builtin callsites yet. + +#include + +#include +#include +#include + +std::atomic atomic_i{0}; + +static void *ThreadFn(void *arg) { + if ((size_t)arg % 2) { + int i = 10; + dfsan_set_label(8, (void *)&i, sizeof(i)); + atomic_i.store(i, std::memory_order_relaxed); + + return 0; + } + int j = atomic_i.load(); + assert(dfsan_get_label(j) == 0 || (dfsan_get_label(j) == 2); + + return 0; +} + +int main(void) { + int i = 10; + dfsan_set_label(2, (void *)&i, sizeof(i)); + atomic_i.store(i, std::memory_order_relaxed); + const int kNumThreads = 24; + pthread_t t[kNumThreads]; + for (int i = 0; i < kNumThreads; ++i) { + pthread_create(&t[i], 0, ThreadFn, (void *)i); + } + for (int i = 0; i < kNumThreads; ++i) { + pthread_join(t[i], 0); + } + return 0; +} diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -593,6 +593,11 @@ /// CTP(other types, PS) = PS Value *collapseToPrimitiveShadow(Value *Shadow, Instruction *Pos); + void storeZeroPrimitiveShadow(Value *Addr, uint64_t Size, Align ShadowAlign, + Instruction *Pos); + + Align getShadowAlign(Align InstAlignment); + private: /// Collapses the shadow with aggregate type into a single primitive shadow /// value. @@ -634,6 +639,8 @@ void visitGetElementPtrInst(GetElementPtrInst &GEPI); void visitLoadInst(LoadInst &LI); void visitStoreInst(StoreInst &SI); + void visitAtomicRMWInst(AtomicRMWInst &I); + void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); void visitReturnInst(ReturnInst &RI); void visitCallBase(CallBase &CB); void visitPHINode(PHINode &PN); @@ -648,6 +655,8 @@ void visitMemTransferInst(MemTransferInst &I); private: + void visitCASOrRMW(Align InstAlignment, Instruction &I); + // Returns false when this is an invoke of a custom function. bool visitWrappedCallBase(Function &F, CallBase &CB); @@ -1801,6 +1810,11 @@ DFSF.setOrigin(&I, CombinedOrigin); } +Align DFSanFunction::getShadowAlign(Align InstAlignment) { + const Align Alignment = ClPreserveAlignment ? InstAlignment : Align(1); + return Align(Alignment.value() * DFS.ShadowWidthBytes); +} + Value *DFSanFunction::loadFast16ShadowFast(Value *ShadowAddr, uint64_t Size, Align ShadowAlign, Instruction *Pos) { @@ -1958,6 +1972,23 @@ return FallbackCall; } +static AtomicOrdering addAcquireOrdering(AtomicOrdering AO) { + switch (AO) { + case AtomicOrdering::NotAtomic: + return AtomicOrdering::NotAtomic; + case AtomicOrdering::Unordered: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Acquire: + return AtomicOrdering::Acquire; + case AtomicOrdering::Release: + case AtomicOrdering::AcquireRelease: + return AtomicOrdering::AcquireRelease; + case AtomicOrdering::SequentiallyConsistent: + return AtomicOrdering::SequentiallyConsistent; + } + llvm_unreachable("Unknown ordering"); +} + void DFSanVisitor::visitLoadInst(LoadInst &LI) { auto &DL = LI.getModule()->getDataLayout(); uint64_t Size = DL.getTypeStoreSize(LI.getType()); @@ -1966,26 +1997,49 @@ return; } + // When an application load is atomic, increase atomic ordering between + // atomic application loads and stores to ensure happen-before order; load + // shadow data after application data; store zero shadow data before + // application data. This ensure shadow loads return either labels of the + // initial application data or zeros. + if (LI.isAtomic()) + LI.setOrdering(addAcquireOrdering(LI.getOrdering())); + Align Alignment = ClPreserveAlignment ? LI.getAlign() : Align(1); + Instruction *Pos = LI.isAtomic() ? LI.getNextNode() : &LI; Value *PrimitiveShadow = - DFSF.loadShadow(LI.getPointerOperand(), Size, Alignment.value(), &LI); + DFSF.loadShadow(LI.getPointerOperand(), Size, Alignment.value(), Pos); if (ClCombinePointerLabelsOnLoad) { Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand()); - PrimitiveShadow = DFSF.combineShadows(PrimitiveShadow, PtrShadow, &LI); + PrimitiveShadow = DFSF.combineShadows(PrimitiveShadow, PtrShadow, Pos); } if (!DFSF.DFS.isZeroShadow(PrimitiveShadow)) DFSF.NonZeroChecks.push_back(PrimitiveShadow); Value *Shadow = - DFSF.expandFromPrimitiveShadow(LI.getType(), PrimitiveShadow, &LI); + DFSF.expandFromPrimitiveShadow(LI.getType(), PrimitiveShadow, Pos); DFSF.setShadow(&LI, Shadow); if (ClEventCallbacks) { - IRBuilder<> IRB(&LI); + IRBuilder<> IRB(Pos); Value *Addr8 = IRB.CreateBitCast(LI.getPointerOperand(), DFSF.DFS.Int8Ptr); IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr8}); } } +void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size, + Align ShadowAlign, + Instruction *Pos) { + IRBuilder<> IRB(Pos); + IntegerType *ShadowTy = + IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits); + Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0); + Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); + Value *ExtShadowAddr = + IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy)); + IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign); + // Do not write origins for 0 shadows because we do not trace origins for + // untainted sinks. +} void DFSanFunction::storePrimitiveShadow(Value *Addr, uint64_t Size, Align Alignment, Value *PrimitiveShadow, @@ -2000,18 +2054,13 @@ } const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes); - IRBuilder<> IRB(Pos); - Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); if (DFS.isZeroShadow(PrimitiveShadow)) { - IntegerType *ShadowTy = - IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits); - Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0); - Value *ExtShadowAddr = - IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy)); - IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign); + storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, Pos); return; } + IRBuilder<> IRB(Pos); + Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits; uint64_t Offset = 0; if (Size >= ShadowVecSize) { @@ -2043,15 +2092,42 @@ } } +static AtomicOrdering addReleaseOrdering(AtomicOrdering AO) { + switch (AO) { + case AtomicOrdering::NotAtomic: + return AtomicOrdering::NotAtomic; + case AtomicOrdering::Unordered: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Release: + return AtomicOrdering::Release; + case AtomicOrdering::Acquire: + case AtomicOrdering::AcquireRelease: + return AtomicOrdering::AcquireRelease; + case AtomicOrdering::SequentiallyConsistent: + return AtomicOrdering::SequentiallyConsistent; + } + llvm_unreachable("Unknown ordering"); +} + void DFSanVisitor::visitStoreInst(StoreInst &SI) { auto &DL = SI.getModule()->getDataLayout(); - uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType()); + Value *Val = SI.getValueOperand(); + uint64_t Size = DL.getTypeStoreSize(Val->getType()); if (Size == 0) return; + // When an application store is atomic, increase atomic ordering between + // atomic application loads and stores to ensure happen-before order; load + // shadow data after application data; store zero shadow data before + // application data. This ensure shadow loads return either labels of the + // initial application data or zeros. + if (SI.isAtomic()) + SI.setOrdering(addReleaseOrdering(SI.getOrdering())); + const Align Alignment = ClPreserveAlignment ? SI.getAlign() : Align(1); - Value* Shadow = DFSF.getShadow(SI.getValueOperand()); + Value *Shadow = + SI.isAtomic() ? DFSF.DFS.getZeroShadow(Val) : DFSF.getShadow(Val); Value *PrimitiveShadow; if (ClCombinePointerLabelsOnStore) { Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand()); @@ -2068,6 +2144,35 @@ } } +void DFSanVisitor::visitCASOrRMW(Align InstAlignment, Instruction &I) { + assert(isa(I) || isa(I)); + + Value *Val = I.getOperand(1); + const auto &DL = I.getModule()->getDataLayout(); + uint64_t Size = DL.getTypeStoreSize(Val->getType()); + if (Size == 0) + return; + + // Conservatively set data at stored addresses and return with zero shadow to + // prevent shadow data races. + IRBuilder<> IRB(&I); + Value *Addr = I.getOperand(0); + const Align ShadowAlign = DFSF.getShadowAlign(InstAlignment); + DFSF.storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, &I); + DFSF.setShadow(&I, DFSF.DFS.getZeroShadow(&I)); + DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin); +} + +void DFSanVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { + visitCASOrRMW(I.getAlign(), I); + I.setOrdering(addReleaseOrdering(I.getOrdering())); +} + +void DFSanVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { + visitCASOrRMW(I.getAlign(), I); + I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering())); +} + void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) { visitInstOperands(UO); } diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll b/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll @@ -0,0 +1,323 @@ +; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -S | FileCheck %s --check-prefix=CHECK +; +; The patterns about origins cannot be tested until the origin tracking feature is complete. + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; atomicrmw xchg: store clean shadow/origin, return clean shadow/origin + +define i32 @AtomicRmwXchg(i32* %p, i32 %x) { +entry: + %0 = atomicrmw xchg i32* %p, i32 %x seq_cst + ret i32 %0 +} + +; CHECK-LABEL: @"dfs$AtomicRmwXchg" +; CHECK-NOT: @__dfsan_arg_origin_tls +; CHECK-NOT: @__dfsan_arg_tls +; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64 +; CHECK-NEXT: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK-NEXT: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK-NEXT: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK-NEXT: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK-NEXT: store i64 0, i64* [[SHADOW_PTR64]], align 2 +; CHECK-NEXT: atomicrmw xchg i32* %p, i32 %x seq_cst +; CHECK-NEXT: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2 +; CHECK_ORIGIN-NEXT: store i32 0, i32* @__dfsan_retval_origin_tls, align 4 +; CHECK-NEXT: ret i32 + + +; atomicrmw max: exactly the same as above + +define i32 @AtomicRmwMax(i32* %p, i32 %x) { +entry: + %0 = atomicrmw max i32* %p, i32 %x seq_cst + ret i32 %0 +} + +; CHECK-LABEL: @"dfs$AtomicRmwMax" +; CHECK-NOT: @__dfsan_arg_origin_tls +; CHECK-NOT: @__dfsan_arg_tls +; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64 +; CHECK-NEXT: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK-NEXT: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK-NEXT: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK-NEXT: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK-NEXT: store i64 0, i64* [[SHADOW_PTR64]], align 2 +; CHECK-NEXT: atomicrmw max i32* %p, i32 %x seq_cst +; CHECK-NEXT: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2 +; CHECK_ORIGIN-NEXT: store i32 0, i32* @__dfsan_retval_origin_tls, align 4 +; CHECK-NEXT: ret i32 + + +; cmpxchg: store clean shadow/origin, return clean shadow/origin + +define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) { +entry: + %pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst + %0 = extractvalue { i32, i1 } %pair, 0 + ret i32 %0 +} + +; CHECK-LABEL: @"dfs$Cmpxchg" +; CHECK-NOT: @__dfsan_arg_origin_tls +; CHECK-NOT: @__dfsan_arg_tls +; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64 +; CHECK-NEXT: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK-NEXT: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK-NEXT: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK-NEXT: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK-NEXT: store i64 0, i64* [[SHADOW_PTR64]], align 2 +; CHECK-NEXT: %pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst +; CHECK: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2 +; CHECK_ORIGIN-NEXT: store i32 0, i32* @__dfsan_retval_origin_tls, align 4 +; CHECK-NEXT: ret i32 + + +; relaxed cmpxchg: bump up to "release monotonic" + +define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) { +entry: + %pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic + %0 = extractvalue { i32, i1 } %pair, 0 + ret i32 %0 +} + +; CHECK-LABEL: @"dfs$CmpxchgMonotonic" +; CHECK-NOT: @__dfsan_arg_origin_tls +; CHECK-NOT: @__dfsan_arg_tls +; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64 +; CHECK-NEXT: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK-NEXT: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK-NEXT: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK-NEXT: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK-NEXT: store i64 0, i64* [[SHADOW_PTR64]], align 2 +; CHECK-NEXT: %pair = cmpxchg i32* %p, i32 %a, i32 %b release monotonic +; CHECK: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2 +; CHECK_ORIGIN-NEXT: store i32 0, i32* @__dfsan_retval_origin_tls, align 4 +; CHECK-NEXT: ret i32 + + +; atomic load: load shadow value after app value + +define i32 @AtomicLoad(i32* %p) { +entry: + %a = load atomic i32, i32* %p seq_cst, align 16 + ret i32 %a +} + +; CHECK-LABEL: @"dfs$AtomicLoad" +; CHECK_ORIGIN: [[PO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4 +; CHECK: [[PS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2 +; CHECK: %a = load atomic i32, i32* %p seq_cst, align 16 +; CHECK: [[INTP:%.*]] = ptrtoint {{.*}} %p to i64 +; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK_ORIGIN: [[ORIGIN_ADDR:%.*]] = add i64 [[OFFSET]], 35184372088832 +; CHECK_ORIGIN: [[ORIGIN_PTR:%.*]] = inttoptr i64 [[ORIGIN_ADDR]] to i32* +; CHECK_ORIGIN: [[AO:%.*]] = load i32, i32* [[ORIGIN_PTR]], align 16 +; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK: [[SHADOW64:%.*]] = load i64, i64* [[SHADOW_PTR64]], align 2 +; CHECK: [[SHADOW64_H32:%.*]] = lshr i64 [[SHADOW64]], 32 +; CHECK: [[SHADOW64_HL32:%.*]] = or i64 [[SHADOW64]], [[SHADOW64_H32]] +; CHECK: [[SHADOW64_HL32_H16:%.*]] = lshr i64 [[SHADOW64_HL32]], 16 +; CHECK: [[SHADOW64_HL32_HL16:%.*]] = or i64 [[SHADOW64_HL32]], [[SHADOW64_HL32_H16]] +; CHECK: [[AS:%.*]] = trunc i64 [[SHADOW64_HL32_HL16]] to i16 +; CHECK: [[AP_S:%.*]] = or i16 [[AS]], [[PS]] +; CHECK_ORIGIN: [[PS_NZ:%.*]] = icmp ne i16 [[PS]], 0 +; CHECK_ORIGIN: [[AP_O:%.*]] = select i1 [[PS_NZ]], i32 [[PO]], i32 [[AO]] +; CHECK: store i16 [[AP_S]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2 +; CHECK_ORIGIN: store i32 [[AP_O]], i32* @__dfsan_retval_origin_tls, align 4 +; CHECK: ret i32 %a + + +; atomic load: load shadow value after app value + +define i32 @AtomicLoadAcquire(i32* %p) { +entry: + %a = load atomic i32, i32* %p acquire, align 16 + ret i32 %a +} + +; CHECK-LABEL: @"dfs$AtomicLoadAcquire" +; CHECK_ORIGIN: [[PO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4 +; CHECK: [[PS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2 +; CHECK: %a = load atomic i32, i32* %p acquire, align 16 +; CHECK: [[INTP:%.*]] = ptrtoint {{.*}} %p to i64 +; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK_ORIGIN: [[ORIGIN_ADDR:%.*]] = add i64 [[OFFSET]], 35184372088832 +; CHECK_ORIGIN: [[ORIGIN_PTR:%.*]] = inttoptr i64 [[ORIGIN_ADDR]] to i32* +; CHECK_ORIGIN: [[AO:%.*]] = load i32, i32* [[ORIGIN_PTR]], align 16 +; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK: [[SHADOW64:%.*]] = load i64, i64* [[SHADOW_PTR64]], align 2 +; CHECK: [[SHADOW64_H32:%.*]] = lshr i64 [[SHADOW64]], 32 +; CHECK: [[SHADOW64_HL32:%.*]] = or i64 [[SHADOW64]], [[SHADOW64_H32]] +; CHECK: [[SHADOW64_HL32_H16:%.*]] = lshr i64 [[SHADOW64_HL32]], 16 +; CHECK: [[SHADOW64_HL32_HL16:%.*]] = or i64 [[SHADOW64_HL32]], [[SHADOW64_HL32_H16]] +; CHECK: [[AS:%.*]] = trunc i64 [[SHADOW64_HL32_HL16]] to i16 +; CHECK: [[AP_S:%.*]] = or i16 [[AS]], [[PS]] +; CHECK_ORIGIN: [[PS_NZ:%.*]] = icmp ne i16 [[PS]], 0 +; CHECK_ORIGIN: [[AP_O:%.*]] = select i1 [[PS_NZ]], i32 [[PO]], i32 [[AO]] +; CHECK: store i16 [[AP_S]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2 +; CHECK_ORIGIN: store i32 [[AP_O]], i32* @__dfsan_retval_origin_tls, align 4 +; CHECK: ret i32 %a + + +; atomic load monotonic: bump up to load acquire + +define i32 @AtomicLoadMonotonic(i32* %p) { +entry: + %a = load atomic i32, i32* %p monotonic, align 16 + ret i32 %a +} + +; CHECK-LABEL: @"dfs$AtomicLoadMonotonic" +; CHECK_ORIGIN: [[PO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4 +; CHECK: [[PS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2 +; CHECK: %a = load atomic i32, i32* %p acquire, align 16 +; CHECK: [[INTP:%.*]] = ptrtoint {{.*}} %p to i64 +; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK_ORIGIN: [[ORIGIN_ADDR:%.*]] = add i64 [[OFFSET]], 35184372088832 +; CHECK_ORIGIN: [[ORIGIN_PTR:%.*]] = inttoptr i64 [[ORIGIN_ADDR]] to i32* +; CHECK_ORIGIN: [[AO:%.*]] = load i32, i32* [[ORIGIN_PTR]], align 16 +; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK: [[SHADOW64:%.*]] = load i64, i64* [[SHADOW_PTR64]], align 2 +; CHECK: [[SHADOW64_H32:%.*]] = lshr i64 [[SHADOW64]], 32 +; CHECK: [[SHADOW64_HL32:%.*]] = or i64 [[SHADOW64]], [[SHADOW64_H32]] +; CHECK: [[SHADOW64_HL32_H16:%.*]] = lshr i64 [[SHADOW64_HL32]], 16 +; CHECK: [[SHADOW64_HL32_HL16:%.*]] = or i64 [[SHADOW64_HL32]], [[SHADOW64_HL32_H16]] +; CHECK: [[AS:%.*]] = trunc i64 [[SHADOW64_HL32_HL16]] to i16 +; CHECK: [[AP_S:%.*]] = or i16 [[AS]], [[PS]] +; CHECK_ORIGIN: [[PS_NZ:%.*]] = icmp ne i16 [[PS]], 0 +; CHECK_ORIGIN: [[AP_O:%.*]] = select i1 [[PS_NZ]], i32 [[PO]], i32 [[AO]] +; CHECK: store i16 [[AP_S]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2 +; CHECK_ORIGIN: store i32 [[AP_O]], i32* @__dfsan_retval_origin_tls, align 4 +; CHECK: ret i32 %a + + +; atomic load unordered: bump up to load acquire + +define i32 @AtomicLoadUnordered(i32* %p) { +entry: + %a = load atomic i32, i32* %p unordered, align 16 + ret i32 %a +} + +; CHECK-LABEL: @"dfs$AtomicLoadUnordered" +; CHECK_ORIGIN: [[PO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4 +; CHECK: [[PS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2 +; CHECK: %a = load atomic i32, i32* %p acquire, align 16 +; CHECK: [[INTP:%.*]] = ptrtoint {{.*}} %p to i64 +; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK_ORIGIN: [[ORIGIN_ADDR:%.*]] = add i64 [[OFFSET]], 35184372088832 +; CHECK_ORIGIN: [[ORIGIN_PTR:%.*]] = inttoptr i64 [[ORIGIN_ADDR]] to i32* +; CHECK_ORIGIN: [[AO:%.*]] = load i32, i32* [[ORIGIN_PTR]], align 16 +; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK: [[SHADOW64:%.*]] = load i64, i64* [[SHADOW_PTR64]], align 2 +; CHECK: [[SHADOW64_H32:%.*]] = lshr i64 [[SHADOW64]], 32 +; CHECK: [[SHADOW64_HL32:%.*]] = or i64 [[SHADOW64]], [[SHADOW64_H32]] +; CHECK: [[SHADOW64_HL32_H16:%.*]] = lshr i64 [[SHADOW64_HL32]], 16 +; CHECK: [[SHADOW64_HL32_HL16:%.*]] = or i64 [[SHADOW64_HL32]], [[SHADOW64_HL32_H16]] +; CHECK: [[AS:%.*]] = trunc i64 [[SHADOW64_HL32_HL16]] to i16 +; CHECK: [[AP_S:%.*]] = or i16 [[AS]], [[PS]] +; CHECK_ORIGIN: [[PS_NZ:%.*]] = icmp ne i16 [[PS]], 0 +; CHECK_ORIGIN: [[AP_O:%.*]] = select i1 [[PS_NZ]], i32 [[PO]], i32 [[AO]] +; CHECK: store i16 [[AP_S]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2 +; CHECK_ORIGIN: store i32 [[AP_O]], i32* @__dfsan_retval_origin_tls, align 4 +; CHECK: ret i32 %a + + +; atomic store: store clean shadow value before app value + +define void @AtomicStore(i32* %p, i32 %x) { +entry: + store atomic i32 %x, i32* %p seq_cst, align 16 + ret void +} + +; CHECK-LABEL: @"dfs$AtomicStore" +; CHECK-NOT: @__dfsan_arg_tls +; CHECK-NOT: @__dfsan_arg_origin_tls +; CHECK_ORIGIN-NOT: 35184372088832 +; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64 +; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK: store i64 0, i64* [[SHADOW_PTR64]], align 2 +; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16 +; CHECK: ret void + + +; atomic store: store clean shadow value before app value + +define void @AtomicStoreRelease(i32* %p, i32 %x) { +entry: + store atomic i32 %x, i32* %p release, align 16 + ret void +} + +; CHECK-LABEL: @"dfs$AtomicStoreRelease" +; CHECK-NOT: @__dfsan_arg_tls +; CHECK-NOT: @__dfsan_arg_origin_tls +; CHECK_ORIGIN-NOT: 35184372088832 +; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64 +; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK: store i64 0, i64* [[SHADOW_PTR64]], align 2 +; CHECK: store atomic i32 %x, i32* %p release, align 16 +; CHECK: ret void + + +; atomic store monotonic: bumped up to store release + +define void @AtomicStoreMonotonic(i32* %p, i32 %x) { +entry: + store atomic i32 %x, i32* %p monotonic, align 16 + ret void +} + +; CHECK-LABEL: @"dfs$AtomicStoreMonotonic" +; CHECK-NOT: @__dfsan_arg_tls +; CHECK-NOT: @__dfsan_arg_origin_tls +; CHECK_ORIGIN-NOT: 35184372088832 +; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64 +; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK: store i64 0, i64* [[SHADOW_PTR64]], align 2 +; CHECK: store atomic i32 %x, i32* %p release, align 16 +; CHECK: ret void + + +; atomic store unordered: bumped up to store release + +define void @AtomicStoreUnordered(i32* %p, i32 %x) { +entry: + store atomic i32 %x, i32* %p unordered, align 16 + ret void +} + +; CHECK-LABEL: @"dfs$AtomicStoreUnordered" +; CHECK-NOT: @__dfsan_arg_tls +; CHECK-NOT: @__dfsan_arg_origin_tls +; CHECK_ORIGIN-NOT: 35184372088832 +; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64 +; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 +; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2 +; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16* +; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64* +; CHECK: store i64 0, i64* [[SHADOW_PTR64]], align 2 +; CHECK: store atomic i32 %x, i32* %p release, align 16 +; CHECK: ret void