diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -207,6 +207,14 @@ "to results."), cl::Hidden, cl::init(true)); +// TODO: This default value follows MSan. DFSan may use a different value. +static cl::opt ClInstrumentWithCallThreshold( + "dfsan-instrument-with-call-threshold", + cl::desc("If the function being instrumented requires more than " + "this number of origin stores, use callbacks instead of " + "inline checks (-1 means never use callbacks)."), + cl::Hidden, cl::init(3500)); + // Controls how to track origins. // * 0: do not track origins. // * 1: track origins at memory store operations. @@ -426,6 +434,7 @@ FunctionCallee DFSanMaybeStoreOriginFn; SmallPtrSet DFSanRuntimeFunctions; MDNode *ColdCallWeights; + MDNode *OriginStoreWeights; DFSanABIList ABIList; DenseMap UnwrappedFnMap; AttrBuilder ReadOnlyNoneAttrs; @@ -578,8 +587,9 @@ std::pair loadShadowOrigin(Value *ShadowAddr, uint64_t Size, Align InstAlignment, Instruction *Pos); - void storePrimitiveShadow(Value *Addr, uint64_t Size, Align Alignment, - Value *PrimitiveShadow, Instruction *Pos); + void storePrimitiveShadowOrigin(Value *Addr, uint64_t Size, + Align InstAlignment, Value *PrimitiveShadow, + Value *Origin, Instruction *Pos); /// Applies PrimitiveShadow to all primitive subtypes of T, returning /// the expanded shadow value. /// @@ -633,6 +643,33 @@ /// checks if it is possible to load labels and origins without using the /// callback function. bool useCallbackLoadLabelAndOrigin(uint64_t Size, Align InstAlignment); + + /// Returns a chain at the current stack with previous origin V. + Value *updateOrigin(Value *V, IRBuilder<> &IRB); + + /// Creates an Intptr = Origin | Origin << 32 if Intptr's size is 64. Returns + /// Origin otherwise. + Value *originToIntptr(IRBuilder<> &IRB, Value *Origin); + + /// Stores Origin into the address range [StoreOriginAddr, StoreOriginAddr + + /// Size). + void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *StoreOriginAddr, + uint64_t StoreOriginSize, Align Alignment); + + /// Stores Origin in terms of its Shadow value. + /// * Do not write origins for zero shadows because we do not trace origins + /// for untainted sinks. + /// * Use __dfsan_maybe_store_origin if there are too many origin store + /// instrumentations. + void storeOrigin(Instruction *Pos, Value *Addr, uint64_t Size, Value *Shadow, + Value *Origin, Value *StoreOriginAddr, Align InstAlignment); + + /// Convert a scalar value to an i1 by comparing with 0. + Value *convertToBool(Value *V, IRBuilder<> &IRB, const Twine &Name = ""); + + bool shouldInstrumentWithCall(); + + int NumOriginStores = 0; }; class DFSanVisitor : public InstVisitor { @@ -837,6 +874,11 @@ llvm_unreachable("Unexpected shadow type"); } +bool DFSanFunction::shouldInstrumentWithCall() { + return ClInstrumentWithCallThreshold >= 0 && + NumOriginStores >= ClInstrumentWithCallThreshold; +} + Value *DFSanFunction::expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow, Instruction *Pos) { Type *ShadowTy = DFS.getShadowTy(T); @@ -1009,6 +1051,7 @@ /*isVarArg=*/false); ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000); + OriginStoreWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000); return true; } @@ -2212,6 +2255,99 @@ } } +Value *DFSanFunction::updateOrigin(Value *V, IRBuilder<> &IRB) { + if (!DFS.shouldTrackOrigins()) + return V; + return IRB.CreateCall(DFS.DFSanChainOriginFn, V); +} + +Value *DFSanFunction::originToIntptr(IRBuilder<> &IRB, Value *Origin) { + const unsigned OriginSize = DataFlowSanitizer::OriginWidthBytes; + const DataLayout &DL = F->getParent()->getDataLayout(); + unsigned IntptrSize = DL.getTypeStoreSize(DFS.IntptrTy); + if (IntptrSize == OriginSize) + return Origin; + assert(IntptrSize == OriginSize * 2); + Origin = IRB.CreateIntCast(Origin, DFS.IntptrTy, /* isSigned */ false); + return IRB.CreateOr(Origin, IRB.CreateShl(Origin, OriginSize * 8)); +} + +void DFSanFunction::paintOrigin(IRBuilder<> &IRB, Value *Origin, + Value *StoreOriginAddr, + uint64_t StoreOriginSize, Align Alignment) { + const unsigned OriginSize = DataFlowSanitizer::OriginWidthBytes; + const DataLayout &DL = F->getParent()->getDataLayout(); + const Align IntptrAlignment = DL.getABITypeAlign(DFS.IntptrTy); + unsigned IntptrSize = DL.getTypeStoreSize(DFS.IntptrTy); + assert(IntptrAlignment >= MinOriginAlignment); + assert(IntptrSize >= OriginSize); + + unsigned Ofs = 0; + Align CurrentAlignment = Alignment; + if (Alignment >= IntptrAlignment && IntptrSize > OriginSize) { + Value *IntptrOrigin = originToIntptr(IRB, Origin); + Value *IntptrStoreOriginPtr = IRB.CreatePointerCast( + StoreOriginAddr, PointerType::get(DFS.IntptrTy, 0)); + for (unsigned I = 0; I < StoreOriginSize / IntptrSize; ++I) { + Value *Ptr = + I ? IRB.CreateConstGEP1_32(DFS.IntptrTy, IntptrStoreOriginPtr, I) + : IntptrStoreOriginPtr; + IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment); + Ofs += IntptrSize / OriginSize; + CurrentAlignment = IntptrAlignment; + } + } + + for (unsigned I = Ofs; I < (StoreOriginSize + OriginSize - 1) / OriginSize; + ++I) { + Value *GEP = I ? IRB.CreateConstGEP1_32(DFS.OriginTy, StoreOriginAddr, I) + : StoreOriginAddr; + IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment); + CurrentAlignment = MinOriginAlignment; + } +} + +Value *DFSanFunction::convertToBool(Value *V, IRBuilder<> &IRB, + const Twine &Name) { + Type *VTy = V->getType(); + assert(VTy->isIntegerTy()); + if (VTy->getIntegerBitWidth() == 1) + // Just converting a bool to a bool, so do nothing. + return V; + return IRB.CreateICmpNE(V, ConstantInt::get(VTy, 0), Name); +} + +void DFSanFunction::storeOrigin(Instruction *Pos, Value *Addr, uint64_t Size, + Value *Shadow, Value *Origin, + Value *StoreOriginAddr, Align InstAlignment) { + // Do not write origins for zero shadows because we do not trace origins for + // untainted sinks. + const Align OriginAlignment = getOriginAlign(InstAlignment); + Value *CollapsedShadow = collapseToPrimitiveShadow(Shadow, Pos); + IRBuilder<> IRB(Pos); + if (auto *ConstantShadow = dyn_cast(CollapsedShadow)) { + if (!ConstantShadow->isZeroValue()) + paintOrigin(IRB, updateOrigin(Origin, IRB), StoreOriginAddr, Size, + OriginAlignment); + return; + } + + if (shouldInstrumentWithCall()) { + IRB.CreateCall(DFS.DFSanMaybeStoreOriginFn, + {CollapsedShadow, + IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), + ConstantInt::get(DFS.IntptrTy, Size), Origin}); + } else { + Value *Cmp = convertToBool(CollapsedShadow, IRB, "_dfscmp"); + Instruction *CheckTerm = SplitBlockAndInsertIfThen( + Cmp, &*IRB.GetInsertPoint(), false, DFS.OriginStoreWeights, &DT); + IRBuilder<> IRBNew(CheckTerm); + paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), StoreOriginAddr, Size, + OriginAlignment); + ++NumOriginStores; + } +} + void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size, Align ShadowAlign, Instruction *Pos) { @@ -2226,30 +2362,46 @@ // Do not write origins for 0 shadows because we do not trace origins for // untainted sinks. } -void DFSanFunction::storePrimitiveShadow(Value *Addr, uint64_t Size, - Align Alignment, - Value *PrimitiveShadow, - Instruction *Pos) { + +void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size, + Align InstAlignment, + Value *PrimitiveShadow, + Value *Origin, + Instruction *Pos) { + const bool ShouldTrackOrigins = DFS.shouldTrackOrigins() && Origin; + if (AllocaInst *AI = dyn_cast(Addr)) { - const auto I = AllocaShadowMap.find(AI); - if (I != AllocaShadowMap.end()) { + const auto SI = AllocaShadowMap.find(AI); + if (SI != AllocaShadowMap.end()) { IRBuilder<> IRB(Pos); - IRB.CreateStore(PrimitiveShadow, I->second); + IRB.CreateStore(PrimitiveShadow, SI->second); + + // Do not write origins for 0 shadows because we do not trace origins for + // untainted sinks. + if (ShouldTrackOrigins && !DFS.isZeroShadow(PrimitiveShadow)) { + const auto OI = AllocaOriginMap.find(AI); + assert(OI != AllocaOriginMap.end() && Origin); + IRB.CreateStore(Origin, OI->second); + } return; } } - const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes); + const Align ShadowAlign = getShadowAlign(InstAlignment); if (DFS.isZeroShadow(PrimitiveShadow)) { storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, Pos); return; } IRBuilder<> IRB(Pos); - Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); + Value *ShadowAddr, *OriginAddr; + std::tie(ShadowAddr, OriginAddr) = + DFS.getShadowOriginAddress(Addr, InstAlignment, Pos); + const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits; uint64_t Offset = 0; - if (Size >= ShadowVecSize) { + uint64_t LeftSize = Size; + if (LeftSize >= ShadowVecSize) { auto *ShadowVecTy = FixedVectorType::get(DFS.PrimitiveShadowTy, ShadowVecSize); Value *ShadowVec = UndefValue::get(ShadowVecTy); @@ -2264,18 +2416,23 @@ Value *CurShadowVecAddr = IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset); IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign); - Size -= ShadowVecSize; + LeftSize -= ShadowVecSize; ++Offset; - } while (Size >= ShadowVecSize); + } while (LeftSize >= ShadowVecSize); Offset *= ShadowVecSize; } - while (Size > 0) { + while (LeftSize > 0) { Value *CurShadowAddr = IRB.CreateConstGEP1_32(DFS.PrimitiveShadowTy, ShadowAddr, Offset); IRB.CreateAlignedStore(PrimitiveShadow, CurShadowAddr, ShadowAlign); - --Size; + --LeftSize; ++Offset; } + + if (ShouldTrackOrigins) { + storeOrigin(Pos, Addr, Size, PrimitiveShadow, Origin, OriginAddr, + InstAlignment); + } } static AtomicOrdering addReleaseOrdering(AtomicOrdering AO) { @@ -2310,19 +2467,36 @@ if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering())); - const Align Alignment = ClPreserveAlignment ? SI.getAlign() : Align(1); + const bool ShouldTrackOrigins = + DFSF.DFS.shouldTrackOrigins() && !SI.isAtomic(); + std::vector Shadows; + std::vector Origins; Value *Shadow = SI.isAtomic() ? DFSF.DFS.getZeroShadow(Val) : DFSF.getShadow(Val); + + if (ShouldTrackOrigins) { + Shadows.push_back(Shadow); + Origins.push_back(DFSF.getOrigin(Val)); + } + Value *PrimitiveShadow; if (ClCombinePointerLabelsOnStore) { Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand()); + if (ShouldTrackOrigins) { + Shadows.push_back(PtrShadow); + Origins.push_back(DFSF.getOrigin(SI.getPointerOperand())); + } PrimitiveShadow = DFSF.combineShadows(Shadow, PtrShadow, &SI); } else { PrimitiveShadow = DFSF.collapseToPrimitiveShadow(Shadow, &SI); } - DFSF.storePrimitiveShadow(SI.getPointerOperand(), Size, Alignment, - PrimitiveShadow, &SI); + Value *Origin = nullptr; + if (ShouldTrackOrigins) { + Origin = DFSF.combineOrigins(Shadows, Origins, &SI); + } + DFSF.storePrimitiveShadowOrigin(SI.getPointerOperand(), Size, SI.getAlign(), + PrimitiveShadow, Origin, &SI); if (ClEventCallbacks) { IRBuilder<> IRB(&SI); Value *Addr8 = IRB.CreateBitCast(SI.getPointerOperand(), DFSF.DFS.Int8Ptr); diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_ldst.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_ldst.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_ldst.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_ldst.ll @@ -1,5 +1,6 @@ ; RUN: opt < %s -dfsan -dfsan-track-origins=1 -dfsan-fast-16-labels=true -S | FileCheck %s --check-prefixes=CHECK_META,CHECK ; RUN: opt < %s -dfsan -dfsan-track-origins=1 -dfsan-fast-16-labels=true -dfsan-combine-pointer-labels-on-load=false -S | FileCheck %s --check-prefixes=CHECK_META,NO_COMBINE_LOAD_PTR +; RUN: opt < %s -dfsan -dfsan-track-origins=1 -dfsan-fast-16-labels=true -dfsan-combine-pointer-labels-on-store=true -S | FileCheck %s --check-prefixes=CHECK_META,COMBINE_STORE_PTR target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -249,3 +250,173 @@ %a = load i17, i17* %p, align 4 ret i17 %a } + +define void @store_zero_to_non_escaped_alloca() { + ; CHECK: @"dfs$store_zero_to_non_escaped_alloca" + ; CHECK-NEXT: [[A:%.*]] = alloca i[[#SBITS]], align [[#SBYTES]] + ; CHECK-NEXT: %_dfsa = alloca i32, align 4 + ; CHECK-NEXT: %p = alloca i[[#SBITS]], align [[#SBYTES]] + ; CHECK-NEXT: store i[[#SBITS]] 0, i[[#SBITS]]* [[A]], align [[#SBYTES]] + ; CHECK-NEXT: store i16 1, i16* %p, align 2 + ; CHECK-NEXT: ret void + + %p = alloca i16 + store i16 1, i16* %p + ret void +} + +define void @store_nonzero_to_non_escaped_alloca(i16 %a) { + ; CHECK: @"dfs$store_nonzero_to_non_escaped_alloca" + ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4 + ; CHECK: %_dfsa = alloca i32, align 4 + ; CHECK: store i32 [[AO]], i32* %_dfsa, align 4 + + %p = alloca i16 + store i16 %a, i16* %p + ret void +} + +declare void @foo(i16* %p) + +define void @store_zero_to_escaped_alloca() { + ; CHECK: @"dfs$store_zero_to_escaped_alloca" + ; CHECK: [[SA:%.*]] = bitcast i[[#SBITS]]* {{.*}} to i32* + ; CHECK_NEXT: store i32 0, i32* [[SA]], align 4 + ; CHECK_NEXT: store i[[#SBITS]] 1, i[[#SBITS]]* %p, align [[#SBYTES]] + ; CHECK_NEXT: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[#SBYTES]] + + ; COMBINE_STORE_PTR: @"dfs$store_zero_to_escaped_alloca" + ; COMBINE_STORE_PTR: [[SA:%.*]] = bitcast i[[#SBITS]]* {{.*}} to i32* + ; COMBINE_STORE_PTR_NEXT: store i32 0, i32* [[SA]], align 4 + ; COMBINE_STORE_PTR_NEXT: store i16 1, i16* %p, align 2 + ; COMBINE_STORE_PTR_NEXT: call void @foo(i16* %p) + ; COMBINE_STORE_PTR_NEXT: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[#SBYTES]] + + %p = alloca i16 + store i16 1, i16* %p + call void @foo(i16* %p) + ret void +} + +define void @store_nonzero_to_escaped_alloca(i16 %a) { + ; CHECK: @"dfs$store_nonzero_to_escaped_alloca" + ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4 + ; CHECK: [[AS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[#SBYTES]] + ; CHECK: [[INTP:%.*]] = ptrtoint {{.*}} %p to i64 + ; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 + ; CHECK: [[ORIGIN_OFFSET:%.*]] = add i64 [[OFFSET]], 35184372088832 + ; CHECK: [[ORIGIN_ADDR:%.*]] = and i64 [[ORIGIN_OFFSET]], -4 + ; CHECK: [[ORIGIN_PTR:%.*]] = inttoptr i64 [[ORIGIN_ADDR]] to i32* + ; CHECK: %_dfscmp = icmp ne i[[#SBITS]] [[AS]], 0 + ; CHECK: br i1 %_dfscmp, label %[[L1:.*]], label %[[L2:.*]], + ; CHECK: [[L1]]: + ; CHECK: [[NO:%.*]] = call i32 @__dfsan_chain_origin(i32 [[AO]]) + ; CHECK: store i32 [[NO]], i32* [[ORIGIN_PTR]], align 4 + ; CHECK: br label %[[L2]] + ; CHECK: [[L2]]: + ; CHECK: store i16 %a, i16* %p, align 2 + + ; COMBINE_STORE_PTR: @"dfs$store_nonzero_to_escaped_alloca" + ; COMBINE_STORE_PTR: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4 + ; COMBINE_STORE_PTR: [[AS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[#SBYTES]] + ; COMBINE_STORE_PTR: [[INTP:%.*]] = ptrtoint {{.*}} %p to i64 + ; COMBINE_STORE_PTR: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913 + ; COMBINE_STORE_PTR: [[ORIGIN_OFFSET:%.*]] = add i64 [[OFFSET]], 35184372088832 + ; COMBINE_STORE_PTR: [[ORIGIN_ADDR:%.*]] = and i64 [[ORIGIN_OFFSET]], -4 + ; COMBINE_STORE_PTR: [[ORIGIN_PTR:%.*]] = inttoptr i64 [[ORIGIN_ADDR]] to i32* + ; COMBINE_STORE_PTR: %_dfscmp = icmp ne i[[#SBITS]] [[AS]], 0 + ; COMBINE_STORE_PTR: br i1 %_dfscmp, label %[[L1:.*]], label %[[L2:.*]], + ; COMBINE_STORE_PTR: [[L1]]: + ; COMBINE_STORE_PTR: [[NO:%.*]] = call i32 @__dfsan_chain_origin(i32 [[AO]]) + ; COMBINE_STORE_PTR: store i32 [[NO]], i32* [[ORIGIN_PTR]], align 4 + ; COMBINE_STORE_PTR: br label %[[L2]] + ; COMBINE_STORE_PTR: [[L2]]: + ; COMBINE_STORE_PTR: store i16 %a, i16* %p, align 2 + + %p = alloca i16 + store i16 %a, i16* %p + call void @foo(i16* %p) + ret void +} + +define void @store64_align8(i64* %p, i64 %a) { + ; CHECK: @"dfs$store64_align8" + ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4 + ; CHECK: [[AS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[#SBYTES]] + ; CHECK: %_dfscmp = icmp ne i[[#SBITS]] [[AS]], 0 + ; CHECK: br i1 %_dfscmp, label %[[L1:.*]], label %[[L2:.*]], + ; CHECK: [[L1]]: + ; CHECK: [[NO:%.*]] = call i32 @__dfsan_chain_origin(i32 [[AO]]) + ; CHECK: [[NO_ZEXT:%.*]] = zext i32 [[NO]] to i64 + ; CHECK: [[NO_SHL:%.*]] = shl i64 [[NO_ZEXT]], 32 + ; CHECK: [[NO2:%.*]] = or i64 [[NO_ZEXT]], [[NO_SHL]] + ; CHECK: [[O_PTR:%.*]] = bitcast i32* {{.*}} to i64* + ; CHECK: store i64 [[NO2]], i64* [[O_PTR]], align 8 + ; CHECK: br label %[[L2]] + ; CHECK: [[L2]]: + ; CHECK: store i64 %a, i64* %p, align 8 + + ; COMBINE_STORE_PTR: @"dfs$store64_align8" + ; COMBINE_STORE_PTR: [[PO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4 + ; COMBINE_STORE_PTR: [[PS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[#SBYTES]] + ; COMBINE_STORE_PTR: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4 + ; COMBINE_STORE_PTR: [[AS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[#SBYTES]] + ; COMBINE_STORE_PTR: [[MS:%.*]] = or i[[#SBITS]] [[AS]], [[PS]] + ; COMBINE_STORE_PTR: [[NE:%.*]] = icmp ne i[[#SBITS]] [[PS]], 0 + ; COMBINE_STORE_PTR: [[MO:%.*]] = select i1 [[NE]], i32 [[PO]], i32 [[AO]] + ; COMBINE_STORE_PTR: %_dfscmp = icmp ne i[[#SBITS]] [[MS]], 0 + ; COMBINE_STORE_PTR: br i1 %_dfscmp, label %[[L1:.*]], label %[[L2:.*]], + ; COMBINE_STORE_PTR: [[L1]]: + ; COMBINE_STORE_PTR: [[NO:%.*]] = call i32 @__dfsan_chain_origin(i32 [[MO]]) + ; COMBINE_STORE_PTR: [[NO_ZEXT:%.*]] = zext i32 [[NO]] to i64 + ; COMBINE_STORE_PTR: [[NO_SHL:%.*]] = shl i64 [[NO_ZEXT]], 32 + ; COMBINE_STORE_PTR: [[NO2:%.*]] = or i64 [[NO_ZEXT]], [[NO_SHL]] + ; COMBINE_STORE_PTR: [[O_PTR:%.*]] = bitcast i32* {{.*}} to i64* + ; COMBINE_STORE_PTR: store i64 [[NO2]], i64* [[O_PTR]], align 8 + ; COMBINE_STORE_PTR: br label %[[L2]] + ; COMBINE_STORE_PTR: [[L2]]: + ; COMBINE_STORE_PTR: store i64 %a, i64* %p, align 8 + + store i64 %a, i64* %p + ret void +} + +define void @store64_align2(i64* %p, i64 %a) { + ; CHECK: @"dfs$store64_align2" + ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4 + ; CHECK: [[AS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[#SBYTES]] + ; CHECK: %_dfscmp = icmp ne i[[#SBITS]] [[AS]], 0 + ; CHECK: br i1 %_dfscmp, label %[[L1:.*]], label %[[L2:.*]], + ; CHECK: [[L1]]: + ; CHECK: [[NO:%.*]] = call i32 @__dfsan_chain_origin(i32 [[AO]]) + ; CHECK: store i32 [[NO]], i32* [[O_PTR0:%.*]], align 4 + ; CHECK: [[O_PTR1:%.*]] = getelementptr i32, i32* [[O_PTR0]], i32 1 + ; CHECK: store i32 [[NO]], i32* [[O_PTR1]], align 4 + ; CHECK: [[L2]]: + ; CHECK: store i64 %a, i64* %p, align [[#SBYTES]] + + store i64 %a, i64* %p, align 2 + ret void +} + +define void @store96_align8(i96* %p, i96 %a) { + ; CHECK: @"dfs$store96_align8" + ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4 + ; CHECK: [[AS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[#SBYTES]] + ; CHECK: %_dfscmp = icmp ne i[[#SBITS]] [[AS]], 0 + ; CHECK: br i1 %_dfscmp, label %[[L1:.*]], label %[[L2:.*]], + ; CHECK: [[L1]]: + ; CHECK: [[NO:%.*]] = call i32 @__dfsan_chain_origin(i32 [[AO]]) + ; CHECK: [[NO_ZEXT:%.*]] = zext i32 [[NO]] to i64 + ; CHECK: [[NO_SHL:%.*]] = shl i64 [[NO_ZEXT]], 32 + ; CHECK: [[NO2:%.*]] = or i64 [[NO_ZEXT]], [[NO_SHL]] + ; CHECK: [[O_PTR64:%.*]] = bitcast i32* [[O_PTR0:%.*]] to i64* + ; CHECK: store i64 [[NO2]], i64* [[O_PTR64]], align 8 + ; CHECK: [[O_PTR1:%.*]] = getelementptr i32, i32* [[O_PTR0]], i32 2 + ; CHECK: store i32 [[NO]], i32* [[O_PTR1]], align 8 + ; CHECK: [[L2]]: + ; CHECK: store i96 %a, i96* %p, align 8 + + store i96 %a, i96* %p, align 8 + ret void +} diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_store_threshold.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_store_threshold.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_store_threshold.ll @@ -0,0 +1,21 @@ +; RUN: opt < %s -dfsan -dfsan-track-origins=1 -dfsan-fast-16-labels=true -dfsan-instrument-with-call-threshold=0 -S | FileCheck %s --check-prefix=CHECK +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]] +; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]] + +define void @store_threshold([2 x i64]* %p, [2 x i64] %a) { + ; CHECK: @"dfs$store_threshold" + ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4 + ; CHECK: [[AS:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to [2 x i[[#SBITS]]]*), align 2 + ; CHECK: [[AS0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[AS]], 0 + ; CHECK: [[AS1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[AS]], 1 + ; CHECK: [[AS01:%.*]] = or i[[#SBITS]] [[AS0]], [[AS1]] + ; CHECK: [[ADDR:%.*]] = bitcast [2 x i64]* %p to i8* + ; CHECK: call void @__dfsan_maybe_store_origin(i[[#SBITS]] [[AS01]], i8* [[ADDR]], i64 16, i32 [[AO]]) + ; CHECK: store [2 x i64] %a, [2 x i64]* %p, align 8 + + store [2 x i64] %a, [2 x i64]* %p + ret void +}