diff --git a/compiler-rt/test/dfsan/basic.c b/compiler-rt/test/dfsan/basic.c --- a/compiler-rt/test/dfsan/basic.c +++ b/compiler-rt/test/dfsan/basic.c @@ -1,5 +1,4 @@ // RUN: %clang_dfsan %s -o %t && %run %t -// RUN: %clang_dfsan -mllvm -dfsan-args-abi %s -o %t && %run %t // // REQUIRES: x86_64-target-arch diff --git a/compiler-rt/test/dfsan/custom.cpp b/compiler-rt/test/dfsan/custom.cpp --- a/compiler-rt/test/dfsan/custom.cpp +++ b/compiler-rt/test/dfsan/custom.cpp @@ -1,8 +1,6 @@ // RUN: %clang_dfsan %s -o %t && DFSAN_OPTIONS="strict_data_dependencies=0" %run %t -// RUN: %clang_dfsan -mllvm -dfsan-args-abi %s -o %t && DFSAN_OPTIONS="strict_data_dependencies=0" %run %t // RUN: %clang_dfsan %s -o %t && DFSAN_OPTIONS="strict_data_dependencies=0" %run %t // RUN: %clang_dfsan -DSTRICT_DATA_DEPENDENCIES %s -o %t && %run %t -// RUN: %clang_dfsan -DSTRICT_DATA_DEPENDENCIES -mllvm -dfsan-args-abi %s -o %t && %run %t // RUN: %clang_dfsan -DORIGIN_TRACKING -mllvm -dfsan-track-origins=1 -mllvm -dfsan-combine-pointer-labels-on-load=false -DSTRICT_DATA_DEPENDENCIES %s -o %t && %run %t // RUN: %clang_dfsan -DORIGIN_TRACKING -mllvm -dfsan-track-origins=1 -mllvm -dfsan-combine-pointer-labels-on-load=false %s -o %t && DFSAN_OPTIONS="strict_data_dependencies=0" %run %t // diff --git a/compiler-rt/test/dfsan/fncall.c b/compiler-rt/test/dfsan/fncall.c --- a/compiler-rt/test/dfsan/fncall.c +++ b/compiler-rt/test/dfsan/fncall.c @@ -1,5 +1,4 @@ // RUN: %clang_dfsan %s -o %t && %run %t -// RUN: %clang_dfsan -mllvm -dfsan-args-abi %s -o %t && %run %t // // REQUIRES: x86_64-target-arch // diff --git a/compiler-rt/test/dfsan/propagate.c b/compiler-rt/test/dfsan/propagate.c --- a/compiler-rt/test/dfsan/propagate.c +++ b/compiler-rt/test/dfsan/propagate.c @@ -1,5 +1,4 @@ // RUN: %clang_dfsan %s -o %t && %run %t -// RUN: %clang_dfsan -mllvm -dfsan-args-abi %s -o %t && %run %t // // REQUIRES: x86_64-target-arch diff --git a/compiler-rt/test/dfsan/vararg.c b/compiler-rt/test/dfsan/vararg.c --- a/compiler-rt/test/dfsan/vararg.c +++ b/compiler-rt/test/dfsan/vararg.c @@ -1,9 +1,6 @@ // RUN: %clang_dfsan %s -o %t // RUN: not %run %t 2>&1 | FileCheck %s // RUN: %run %t foo -// RUN: %clang_dfsan -mllvm -dfsan-args-abi %s -o %t -// RUN: not %run %t 2>&1 | FileCheck %s -// RUN: %run %t foo // // REQUIRES: x86_64-target-arch diff --git a/compiler-rt/test/dfsan/write_callback.c b/compiler-rt/test/dfsan/write_callback.c --- a/compiler-rt/test/dfsan/write_callback.c +++ b/compiler-rt/test/dfsan/write_callback.c @@ -1,5 +1,4 @@ -// RUN: %clang_dfsan %s -o %t && %run %t | FileCheck %s -// RUN: %clang_dfsan -mllvm -dfsan-args-abi %s -o %t && %run %t | FileCheck %s +// RUN: %clang_dfsan %s -o %t && %run %t | FileCheck %s // // REQUIRES: x86_64-target-arch diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -18,6 +18,9 @@ /// The analysis is based on automatic propagation of data flow labels (also /// known as taint labels) through a program as it performs computation. /// +/// Argument and return value labels are passed through TLS variables +/// __dfsan_arg_tls and __dfsan_retval_tls. +/// /// Each byte of application memory is backed by a shadow memory byte. The /// shadow byte can represent up to 8 labels. On Linux/x86_64, memory is then /// laid out as follows: @@ -160,13 +163,6 @@ cl::desc("File listing native ABI functions and how the pass treats them"), cl::Hidden); -// Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented -// functions (see DataFlowSanitizer::InstrumentedABI below). -static cl::opt - ClArgsABI("dfsan-args-abi", - cl::desc("Use the argument ABI rather than the TLS ABI"), - cl::Hidden); - // Controls whether the pass includes or ignores the labels of pointers in load // instructions. static cl::opt ClCombinePointerLabelsOnLoad( @@ -381,17 +377,6 @@ enum { OriginWidthBits = 32, OriginWidthBytes = OriginWidthBits / 8 }; - /// Which ABI should be used for instrumented functions? - enum InstrumentedABI { - /// Argument and return value labels are passed through additional - /// arguments and by modifying the return type. - IA_Args, - - /// Argument and return value labels are passed through TLS variables - /// __dfsan_arg_tls and __dfsan_retval_tls. - IA_TLS - }; - /// How should calls to uninstrumented functions be handled? enum WrapperKind { /// This function is present in an uninstrumented form but we don't know @@ -409,9 +394,7 @@ /// Instead of calling the function, a custom wrapper __dfsw_F is called, /// where F is the name of the function. This function may wrap the - /// original function or provide its own implementation. This is similar to - /// the IA_Args ABI, except that IA_Args uses a struct return type to - /// pass the return value shadow in a register, while WK_Custom uses an + /// original function or provide its own implementation. WK_Custom uses an /// extra pointer argument to return the shadow. This allows the wrapped /// form of the function type to be expressed in C. WK_Custom @@ -482,7 +465,6 @@ FunctionType *getArgsFunctionType(FunctionType *T); FunctionType *getTrampolineFunctionType(FunctionType *T); TransformedFunction getCustomFunctionType(FunctionType *T); - InstrumentedABI getInstrumentedABI(); WrapperKind getWrapperKind(Function *F); void addGlobalNameSuffix(GlobalValue *GV); Function *buildWrapperFunction(Function *F, StringRef NewFName, @@ -506,18 +488,11 @@ /// Returns whether the pass tracks origins. Supports only TLS ABI mode. bool shouldTrackOrigins(); - /// Returns whether the pass tracks labels for struct fields and array - /// indices. Supports only TLS ABI mode. - bool shouldTrackFieldsAndIndices(); - /// Returns a zero constant with the shadow type of OrigTy. /// /// getZeroShadow({T1,T2,...}) = {getZeroShadow(T1),getZeroShadow(T2,...} /// getZeroShadow([n x T]) = [n x getZeroShadow(T)] /// getZeroShadow(other type) = i16(0) - /// - /// Note that a zero shadow is always i16(0) when shouldTrackFieldsAndIndices - /// returns false. Constant *getZeroShadow(Type *OrigTy); /// Returns a zero constant with the shadow type of V's type. Constant *getZeroShadow(Value *V); @@ -530,9 +505,6 @@ /// getShadowTy({T1,T2,...}) = {getShadowTy(T1),getShadowTy(T2),...} /// getShadowTy([n x T]) = [n x getShadowTy(T)] /// getShadowTy(other type) = i16 - /// - /// Note that a shadow type is always i16 when shouldTrackFieldsAndIndices - /// returns false. Type *getShadowTy(Type *OrigTy); /// Returns the shadow type of of V's type. Type *getShadowTy(Value *V); @@ -549,7 +521,6 @@ DataFlowSanitizer &DFS; Function *F; DominatorTree DT; - DataFlowSanitizer::InstrumentedABI IA; bool IsNativeABI; bool IsForceZeroLabels; AllocaInst *LabelReturnAlloca = nullptr; @@ -584,7 +555,7 @@ DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI, bool IsForceZeroLabels) - : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI), + : DFS(DFS), F(F), IsNativeABI(IsNativeABI), IsForceZeroLabels(IsForceZeroLabels) { DT.recalculate(*F); } @@ -874,9 +845,6 @@ } bool DataFlowSanitizer::isZeroShadow(Value *V) { - if (!shouldTrackFieldsAndIndices()) - return ZeroPrimitiveShadow == V; - Type *T = V->getType(); if (!isa(T) && !isa(T)) { if (const ConstantInt *CI = dyn_cast(V)) @@ -893,19 +861,11 @@ } bool DataFlowSanitizer::shouldTrackOrigins() { - static const bool ShouldTrackOrigins = - ClTrackOrigins && getInstrumentedABI() == DataFlowSanitizer::IA_TLS; + static const bool ShouldTrackOrigins = ClTrackOrigins; return ShouldTrackOrigins; } -bool DataFlowSanitizer::shouldTrackFieldsAndIndices() { - return getInstrumentedABI() == DataFlowSanitizer::IA_TLS; -} - Constant *DataFlowSanitizer::getZeroShadow(Type *OrigTy) { - if (!shouldTrackFieldsAndIndices()) - return ZeroPrimitiveShadow; - if (!isa(OrigTy) && !isa(OrigTy)) return ZeroPrimitiveShadow; Type *ShadowTy = getShadowTy(OrigTy); @@ -1005,8 +965,6 @@ if (!isa(ShadowTy) && !isa(ShadowTy)) return Shadow; - assert(DFS.shouldTrackFieldsAndIndices()); - // Checks if the cached collapsed shadow value dominates Pos. Value *&CS = CachedCollapsedShadows[Shadow]; if (CS && DT.dominates(CS, Pos)) @@ -1020,9 +978,6 @@ } Type *DataFlowSanitizer::getShadowTy(Type *OrigTy) { - if (!shouldTrackFieldsAndIndices()) - return PrimitiveShadowTy; - if (!OrigTy->isSized()) return PrimitiveShadowTy; if (isa(OrigTy)) @@ -1124,10 +1079,6 @@ return ABIList.isIn(*F, "force_zero_labels"); } -DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() { - return ClArgsABI ? IA_Args : IA_TLS; -} - DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) { if (ABIList.isIn(*F, "functional")) return WK_Functional; @@ -1471,46 +1422,11 @@ // Instrumented functions get a '.dfsan' suffix. This allows us to more // easily identify cases of mismatching ABIs. This naming scheme is // mangling-compatible (see Itanium ABI), using a vendor-specific suffix. - if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) { - FunctionType *NewFT = getArgsFunctionType(FT); - Function *NewF = Function::Create(NewFT, F.getLinkage(), - F.getAddressSpace(), "", &M); - NewF->copyAttributesFrom(&F); - NewF->removeRetAttrs( - AttributeFuncs::typeIncompatible(NewFT->getReturnType())); - for (Function::arg_iterator FArg = F.arg_begin(), - NewFArg = NewF->arg_begin(), - FArgEnd = F.arg_end(); - FArg != FArgEnd; ++FArg, ++NewFArg) { - FArg->replaceAllUsesWith(&*NewFArg); - } - NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList()); - - for (Function::user_iterator UI = F.user_begin(), UE = F.user_end(); - UI != UE;) { - BlockAddress *BA = dyn_cast(*UI); - ++UI; - if (BA) { - BA->replaceAllUsesWith( - BlockAddress::get(NewF, BA->getBasicBlock())); - delete BA; - } - } - F.replaceAllUsesWith( - ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT))); - NewF->takeName(&F); - F.eraseFromParent(); - *FI = NewF; - addGlobalNameSuffix(NewF); - } else { - addGlobalNameSuffix(&F); - } + addGlobalNameSuffix(&F); } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) { // Build a wrapper function for F. The wrapper simply calls F, and is // added to FnsToInstrument so that any instrumentation according to its // WrapperKind is done in the second pass below. - FunctionType *NewFT = - getInstrumentedABI() == IA_Args ? getArgsFunctionType(FT) : FT; // If the function being wrapped has local linkage, then preserve the // function's linkage in the wrapper function. @@ -1522,9 +1438,8 @@ &F, (shouldTrackOrigins() ? std::string("dfso$") : std::string("dfsw$")) + std::string(F.getName()), - WrapperLinkage, NewFT); - if (getInstrumentedABI() == IA_TLS) - NewF->removeFnAttrs(ReadOnlyNoneAttrs); + WrapperLinkage, FT); + NewF->removeFnAttrs(ReadOnlyNoneAttrs); Value *WrappedFnCst = ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)); @@ -1661,23 +1576,14 @@ if (Argument *A = dyn_cast(V)) { if (IsNativeABI) return DFS.ZeroOrigin; - switch (IA) { - case DataFlowSanitizer::IA_TLS: { - if (A->getArgNo() < DFS.NumOfElementsInArgOrgTLS) { - Instruction *ArgOriginTLSPos = &*F->getEntryBlock().begin(); - IRBuilder<> IRB(ArgOriginTLSPos); - Value *ArgOriginPtr = getArgOriginTLS(A->getArgNo(), IRB); - Origin = IRB.CreateLoad(DFS.OriginTy, ArgOriginPtr); - } else { - // Overflow - Origin = DFS.ZeroOrigin; - } - break; - } - case DataFlowSanitizer::IA_Args: { + if (A->getArgNo() < DFS.NumOfElementsInArgOrgTLS) { + Instruction *ArgOriginTLSPos = &*F->getEntryBlock().begin(); + IRBuilder<> IRB(ArgOriginTLSPos); + Value *ArgOriginPtr = getArgOriginTLS(A->getArgNo(), IRB); + Origin = IRB.CreateLoad(DFS.OriginTy, ArgOriginPtr); + } else { + // Overflow Origin = DFS.ZeroOrigin; - break; - } } } else { Origin = DFS.ZeroOrigin; @@ -1735,20 +1641,7 @@ if (Argument *A = dyn_cast(V)) { if (IsNativeABI) return DFS.getZeroShadow(V); - switch (IA) { - case DataFlowSanitizer::IA_TLS: { - Shadow = getShadowForTLSArgument(A); - break; - } - case DataFlowSanitizer::IA_Args: { - unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2; - Function::arg_iterator Arg = F->arg_begin(); - std::advance(Arg, ArgIdx); - Shadow = &*Arg; - assert(Shadow->getType() == DFS.PrimitiveShadowTy); - break; - } - } + Shadow = getShadowForTLSArgument(A); NonZeroChecks.push_back(Shadow); } else { Shadow = DFS.getZeroShadow(V); @@ -1759,8 +1652,6 @@ void DFSanFunction::setShadow(Instruction *I, Value *Shadow) { assert(!ValShadowMap.count(I)); - assert(DFS.shouldTrackFieldsAndIndices() || - Shadow->getType() == DFS.PrimitiveShadowTy); ValShadowMap[I] = Shadow; } @@ -2577,15 +2468,12 @@ } void DFSanVisitor::visitBitCastInst(BitCastInst &BCI) { - if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { - // Special case: if this is the bitcast (there is exactly 1 allowed) between - // a musttail call and a ret, don't instrument. New instructions are not - // allowed after a musttail call. - if (auto *CI = dyn_cast(BCI.getOperand(0))) - if (CI->isMustTailCall()) - return; - } - // TODO: handle musttail call returns for IA_Args. + // Special case: if this is the bitcast (there is exactly 1 allowed) between + // a musttail call and a ret, don't instrument. New instructions are not + // allowed after a musttail call. + if (auto *CI = dyn_cast(BCI.getOperand(0))) + if (CI->isMustTailCall()) + return; visitInstOperands(BCI); } @@ -2643,11 +2531,6 @@ } void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) { - if (!DFSF.DFS.shouldTrackFieldsAndIndices()) { - visitInstOperands(I); - return; - } - IRBuilder<> IRB(&I); Value *Agg = I.getAggregateOperand(); Value *AggShadow = DFSF.getShadow(Agg); @@ -2657,11 +2540,6 @@ } void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) { - if (!DFSF.DFS.shouldTrackFieldsAndIndices()) { - visitInstOperands(I); - return; - } - IRBuilder<> IRB(&I); Value *AggShadow = DFSF.getShadow(I.getAggregateOperand()); Value *InsShadow = DFSF.getShadow(I.getInsertedValueOperand()); @@ -2812,41 +2690,22 @@ void DFSanVisitor::visitReturnInst(ReturnInst &RI) { if (!DFSF.IsNativeABI && RI.getReturnValue()) { - switch (DFSF.IA) { - case DataFlowSanitizer::IA_TLS: { - // Don't emit the instrumentation for musttail call returns. - if (isAMustTailRetVal(RI.getReturnValue())) - return; - - Value *S = DFSF.getShadow(RI.getReturnValue()); - IRBuilder<> IRB(&RI); - Type *RT = DFSF.F->getFunctionType()->getReturnType(); - unsigned Size = - getDataLayout().getTypeAllocSize(DFSF.DFS.getShadowTy(RT)); - if (Size <= RetvalTLSSize) { - // If the size overflows, stores nothing. At callsite, oversized return - // shadows are set to zero. - IRB.CreateAlignedStore(S, DFSF.getRetvalTLS(RT, IRB), - ShadowTLSAlignment); - } - if (DFSF.DFS.shouldTrackOrigins()) { - Value *O = DFSF.getOrigin(RI.getReturnValue()); - IRB.CreateStore(O, DFSF.getRetvalOriginTLS()); - } - break; - } - case DataFlowSanitizer::IA_Args: { - // TODO: handle musttail call returns for IA_Args. - - IRBuilder<> IRB(&RI); - Type *RT = DFSF.F->getFunctionType()->getReturnType(); - Value *InsVal = - IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0); - Value *InsShadow = - IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1); - RI.setOperand(0, InsShadow); - break; + // Don't emit the instrumentation for musttail call returns. + if (isAMustTailRetVal(RI.getReturnValue())) + return; + + Value *S = DFSF.getShadow(RI.getReturnValue()); + IRBuilder<> IRB(&RI); + Type *RT = DFSF.F->getFunctionType()->getReturnType(); + unsigned Size = getDataLayout().getTypeAllocSize(DFSF.DFS.getShadowTy(RT)); + if (Size <= RetvalTLSSize) { + // If the size overflows, stores nothing. At callsite, oversized return + // shadows are set to zero. + IRB.CreateAlignedStore(S, DFSF.getRetvalTLS(RT, IRB), ShadowTLSAlignment); } + if (DFSF.DFS.shouldTrackOrigins()) { + Value *O = DFSF.getOrigin(RI.getReturnValue()); + IRB.CreateStore(O, DFSF.getRetvalOriginTLS()); } } } @@ -3069,32 +2928,30 @@ const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins(); FunctionType *FT = CB.getFunctionType(); - if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { - // Stores argument shadows. - unsigned ArgOffset = 0; - const DataLayout &DL = getDataLayout(); - for (unsigned I = 0, N = FT->getNumParams(); I != N; ++I) { - if (ShouldTrackOrigins) { - // Ignore overflowed origins - Value *ArgShadow = DFSF.getShadow(CB.getArgOperand(I)); - if (I < DFSF.DFS.NumOfElementsInArgOrgTLS && - !DFSF.DFS.isZeroShadow(ArgShadow)) - IRB.CreateStore(DFSF.getOrigin(CB.getArgOperand(I)), - DFSF.getArgOriginTLS(I, IRB)); - } + const DataLayout &DL = getDataLayout(); - unsigned Size = - DL.getTypeAllocSize(DFSF.DFS.getShadowTy(FT->getParamType(I))); - // Stop storing if arguments' size overflows. Inside a function, arguments - // after overflow have zero shadow values. - if (ArgOffset + Size > ArgTLSSize) - break; - IRB.CreateAlignedStore( - DFSF.getShadow(CB.getArgOperand(I)), - DFSF.getArgTLS(FT->getParamType(I), ArgOffset, IRB), - ShadowTLSAlignment); - ArgOffset += alignTo(Size, ShadowTLSAlignment); + // Stores argument shadows. + unsigned ArgOffset = 0; + for (unsigned I = 0, N = FT->getNumParams(); I != N; ++I) { + if (ShouldTrackOrigins) { + // Ignore overflowed origins + Value *ArgShadow = DFSF.getShadow(CB.getArgOperand(I)); + if (I < DFSF.DFS.NumOfElementsInArgOrgTLS && + !DFSF.DFS.isZeroShadow(ArgShadow)) + IRB.CreateStore(DFSF.getOrigin(CB.getArgOperand(I)), + DFSF.getArgOriginTLS(I, IRB)); } + + unsigned Size = + DL.getTypeAllocSize(DFSF.DFS.getShadowTy(FT->getParamType(I))); + // Stop storing if arguments' size overflows. Inside a function, arguments + // after overflow have zero shadow values. + if (ArgOffset + Size > ArgTLSSize) + break; + IRB.CreateAlignedStore(DFSF.getShadow(CB.getArgOperand(I)), + DFSF.getArgTLS(FT->getParamType(I), ArgOffset, IRB), + ShadowTLSAlignment); + ArgOffset += alignTo(Size, ShadowTLSAlignment); } Instruction *Next = nullptr; @@ -3112,98 +2969,31 @@ Next = CB.getNextNode(); } - if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { - // Don't emit the epilogue for musttail call returns. - if (isa(CB) && cast(CB).isMustTailCall()) - return; - - // Loads the return value shadow. - IRBuilder<> NextIRB(Next); - const DataLayout &DL = getDataLayout(); - unsigned Size = DL.getTypeAllocSize(DFSF.DFS.getShadowTy(&CB)); - if (Size > RetvalTLSSize) { - // Set overflowed return shadow to be zero. - DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB)); - } else { - LoadInst *LI = NextIRB.CreateAlignedLoad( - DFSF.DFS.getShadowTy(&CB), DFSF.getRetvalTLS(CB.getType(), NextIRB), - ShadowTLSAlignment, "_dfsret"); - DFSF.SkipInsts.insert(LI); - DFSF.setShadow(&CB, LI); - DFSF.NonZeroChecks.push_back(LI); - } - - if (ShouldTrackOrigins) { - LoadInst *LI = NextIRB.CreateLoad( - DFSF.DFS.OriginTy, DFSF.getRetvalOriginTLS(), "_dfsret_o"); - DFSF.SkipInsts.insert(LI); - DFSF.setOrigin(&CB, LI); - } - } - } - - // Do all instrumentation for IA_Args down here to defer tampering with the - // CFG in a way that SplitEdge may be able to detect. - if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) { - // TODO: handle musttail call returns for IA_Args. - - FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT); - Value *Func = - IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT)); - - const unsigned NumParams = FT->getNumParams(); - - // Copy original arguments. - auto *ArgIt = CB.arg_begin(), *ArgEnd = CB.arg_end(); - std::vector Args(NumParams); - std::copy_n(ArgIt, NumParams, Args.begin()); - - // Add shadow arguments by transforming original arguments. - std::generate_n(std::back_inserter(Args), NumParams, - [&]() { return DFSF.getShadow(*ArgIt++); }); - - if (FT->isVarArg()) { - unsigned VarArgSize = CB.arg_size() - NumParams; - ArrayType *VarArgArrayTy = - ArrayType::get(DFSF.DFS.PrimitiveShadowTy, VarArgSize); - AllocaInst *VarArgShadow = - new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(), - "", &DFSF.F->getEntryBlock().front()); - Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0)); - - // Copy remaining var args. - unsigned GepIndex = 0; - std::for_each(ArgIt, ArgEnd, [&](Value *Arg) { - IRB.CreateStore( - DFSF.getShadow(Arg), - IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, GepIndex++)); - Args.push_back(Arg); - }); - } + // Don't emit the epilogue for musttail call returns. + if (isa(CB) && cast(CB).isMustTailCall()) + return; - CallBase *NewCB; - if (InvokeInst *II = dyn_cast(&CB)) { - NewCB = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(), - II->getUnwindDest(), Args); + // Loads the return value shadow. + IRBuilder<> NextIRB(Next); + unsigned Size = DL.getTypeAllocSize(DFSF.DFS.getShadowTy(&CB)); + if (Size > RetvalTLSSize) { + // Set overflowed return shadow to be zero. + DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB)); } else { - NewCB = IRB.CreateCall(NewFT, Func, Args); - } - NewCB->setCallingConv(CB.getCallingConv()); - NewCB->setAttributes(CB.getAttributes().removeRetAttributes( - *DFSF.DFS.Ctx, AttributeFuncs::typeIncompatible(NewCB->getType()))); - - if (Next) { - ExtractValueInst *ExVal = ExtractValueInst::Create(NewCB, 0, "", Next); - DFSF.SkipInsts.insert(ExVal); - ExtractValueInst *ExShadow = ExtractValueInst::Create(NewCB, 1, "", Next); - DFSF.SkipInsts.insert(ExShadow); - DFSF.setShadow(ExVal, ExShadow); - DFSF.NonZeroChecks.push_back(ExShadow); - - CB.replaceAllUsesWith(ExVal); + LoadInst *LI = NextIRB.CreateAlignedLoad( + DFSF.DFS.getShadowTy(&CB), DFSF.getRetvalTLS(CB.getType(), NextIRB), + ShadowTLSAlignment, "_dfsret"); + DFSF.SkipInsts.insert(LI); + DFSF.setShadow(&CB, LI); + DFSF.NonZeroChecks.push_back(LI); } - CB.eraseFromParent(); + if (ShouldTrackOrigins) { + LoadInst *LI = NextIRB.CreateLoad(DFSF.DFS.OriginTy, + DFSF.getRetvalOriginTLS(), "_dfsret_o"); + DFSF.SkipInsts.insert(LI); + DFSF.setOrigin(&CB, LI); + } } } diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/abilist.ll b/llvm/test/Instrumentation/DataFlowSanitizer/abilist.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/abilist.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/abilist.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -dfsan -dfsan-args-abi -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s +; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -16,26 +16,12 @@ ret i32 %c } -; CHECK: define i32 (i32, i32)* @discardg(i32 %0) -; CHECK: %[[CALL:.*]] = call { i32 (i32, i32)*, i[[#SBITS]] } @g.dfsan(i32 %0, i[[#SBITS]] 0) -; CHECK: %[[XVAL:.*]] = extractvalue { i32 (i32, i32)*, i[[#SBITS]] } %[[CALL]], 0 -; CHECK: ret {{.*}} %[[XVAL]] @discardg = alias i32 (i32, i32)* (i32), i32 (i32, i32)* (i32)* @g declare void @custom1(i32 %a, i32 %b) -; CHECK: define linkonce_odr { i32, i[[#SBITS]] } @"dfsw$custom2"(i32 %0, i32 %1, i[[#SBITS]] %2, i[[#SBITS]] %3) -; CHECK: %[[LABELRETURN2:.*]] = alloca i[[#SBITS]] -; CHECK: %[[RV:.*]] = call i32 @__dfsw_custom2 -; CHECK: %[[RVSHADOW:.*]] = load i[[#SBITS]], i[[#SBITS]]* %[[LABELRETURN2]] -; CHECK: insertvalue {{.*}}[[RV]], 0 -; CHECK: insertvalue {{.*}}[[RVSHADOW]], 1 -; CHECK: ret { i32, i[[#SBITS]] } declare i32 @custom2(i32 %a, i32 %b) -; CHECK: define linkonce_odr void @"dfsw$custom3"(i32 %0, i[[#SBITS]] %1, i[[#SBITS]]* %2, ...) -; CHECK: call void @__dfsan_vararg_wrapper(i8* -; CHECK: unreachable declare void @custom3(i32 %a, ...) declare i32 @custom4(i32 %a, ...) @@ -81,24 +67,38 @@ ret i32 (i32, i32)* @custom2 } -; CHECK: define { i32, i[[#SBITS]] } @adiscard.dfsan(i32 %0, i32 %1, i[[#SBITS]] %2, i[[#SBITS]] %3) +; CHECK: define i32 (i32, i32)* @discardg(i32 %0) +; CHECK: %[[CALL:.*]] = call i32 (i32, i32)* @g.dfsan(i32 %0) +; CHECK: load {{.*}} @__dfsan_retval_tls +; CHECK: ret {{.*}} + +; CHECK: define i32 @adiscard.dfsan(i32 %0, i32 %1) ; CHECK: %[[CALL:.*]] = call i32 @discard(i32 %0, i32 %1) -; CHECK: %[[IVAL0:.*]] = insertvalue { i32, i[[#SBITS]] } undef, i32 %[[CALL]], 0 -; CHECK: %[[IVAL1:.*]] = insertvalue { i32, i[[#SBITS]] } %[[IVAL0]], i[[#SBITS]] 0, 1 -; CHECK: ret { i32, i[[#SBITS]] } %[[IVAL1]] +; CHECK: ret i32 @adiscard = alias i32 (i32, i32), i32 (i32, i32)* @discard +; CHECK: define linkonce_odr i32 @"dfsw$custom2"(i32 %0, i32 %1) +; CHECK: %[[LABELRETURN2:.*]] = alloca i[[#SBITS]] +; CHECK: %[[RV:.*]] = call i32 @__dfsw_custom2(i32 {{.*}}, i32 {{.*}}, i[[#SBITS]] {{.*}}, i[[#SBITS]] {{.*}}, i[[#SBITS]]* %[[LABELRETURN2]]) +; CHECK: %[[RVSHADOW:.*]] = load i[[#SBITS]], i[[#SBITS]]* %[[LABELRETURN2]] +; CHECK: store {{.*}} @__dfsan_retval_tls +; CHECK: ret i32 + +; CHECK: define linkonce_odr void @"dfsw$custom3"(i32 %0, ...) +; CHECK: call void @__dfsan_vararg_wrapper(i8* +; CHECK: unreachable + +; CHECK: define linkonce_odr i32 @"dfsw$custom4"(i32 %0, ...) + ; CHECK: declare void @__dfsw_custom1(i32, i32, i[[#SBITS]], i[[#SBITS]]) ; CHECK: declare i32 @__dfsw_custom2(i32, i32, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*) ; CHECK-LABEL: define linkonce_odr i32 @"dfst0$customcb" ; CHECK-SAME: (i32 (i32)* %0, i32 %1, i[[#SBITS]] %2, i[[#SBITS]]* %3) -; CHECK: %[[BC:.*]] = bitcast i32 (i32)* %0 to { i32, i[[#SBITS]] } (i32, i[[#SBITS]])* -; CHECK: %[[CALL:.*]] = call { i32, i[[#SBITS]] } %[[BC]](i32 %1, i[[#SBITS]] %2) -; CHECK: %[[XVAL0:.*]] = extractvalue { i32, i[[#SBITS]] } %[[CALL]], 0 -; CHECK: %[[XVAL1:.*]] = extractvalue { i32, i[[#SBITS]] } %[[CALL]], 1 -; CHECK: store i[[#SBITS]] %[[XVAL1]], i[[#SBITS]]* %3 -; CHECK: ret i32 %[[XVAL0]] +; CHECK: %[[CALL:.*]] = call i32 %0(i32 %1) +; CHECK: %[[RVSHADOW2:.*]] = load i[[#SBITS]], {{.*}} @__dfsan_retval_tls +; CHECK: store i[[#SBITS]] %[[RVSHADOW2]], i[[#SBITS]]* %3 +; CHECK: ret i32 %[[CALL]] ; CHECK: declare void @__dfsw_custom3(i32, i[[#SBITS]], i[[#SBITS]]*, ...) ; CHECK: declare i32 @__dfsw_custom4(i32, i[[#SBITS]], i[[#SBITS]]*, i[[#SBITS]]*, ...) diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/abilist_aggregate.ll b/llvm/test/Instrumentation/DataFlowSanitizer/abilist_aggregate.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/abilist_aggregate.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/abilist_aggregate.ll @@ -1,13 +1,11 @@ -; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s --check-prefixes=CHECK,TLS_ABI -; RUN: opt < %s -dfsan -dfsan-args-abi -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s --check-prefixes=CHECK,ARGS_ABI +; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" ; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]] ; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]] -; TLS_ABI: define { i1, i7 } @functional({ i32, i1 } %a, [2 x i7] %b) -; ARGS_ABI: define { i1, i7 } @functional({ i32, i1 } %a, [2 x i7] %b) +; CHECK: define { i1, i7 } @functional({ i32, i1 } %a, [2 x i7] %b) define {i1, i7} @functional({i32, i1} %a, [2 x i7] %b) { %a1 = extractvalue {i32, i1} %a, 1 %b0 = extractvalue [2 x i7] %b, 0 @@ -17,32 +15,25 @@ } define {i1, i7} @call_functional({i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI-LABEL: @call_functional.dfsan - ; TLS_ABI-NEXT: %[[#REG:]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] - ; TLS_ABI-NEXT: %[[#REG+1]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; TLS_ABI-NEXT: %[[#REG+2]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %[[#REG+1]], 0 - ; TLS_ABI-NEXT: %[[#REG+3]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %[[#REG+1]], 1 - ; TLS_ABI-NEXT: %[[#REG+4]] = or i[[#SBITS]] %[[#REG+2]], %[[#REG+3]] - ; TLS_ABI-NEXT: %[[#REG+5]] = extractvalue [2 x i[[#SBITS]]] %[[#REG]], 0 - ; TLS_ABI-NEXT: %[[#REG+6]] = extractvalue [2 x i[[#SBITS]]] %[[#REG]], 1 - ; TLS_ABI-NEXT: %[[#REG+7]] = or i[[#SBITS]] %[[#REG+5]], %[[#REG+6]] - ; TLS_ABI-NEXT: %[[#REG+8]] = or i[[#SBITS]] %[[#REG+4]], %[[#REG+7]] - ; TLS_ABI-NEXT: %[[#REG+9]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] %[[#REG+8]], 0 - ; TLS_ABI-NEXT: %[[#REG+10]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#REG+9]], i[[#SBITS]] %[[#REG+8]], 1 - ; TLS_ABI: store { i[[#SBITS]], i[[#SBITS]] } %[[#REG+10]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - - ; ARGS_ABI: @call_functional.dfsan({ i32, i1 } %0, [2 x i7] %1, i[[#SBITS]] %2, i[[#SBITS]] %3) - ; ARGS_ABI: %[[#U:]] = or i[[#SBITS]] %2, %3 - ; ARGS_ABI: %r = call { i1, i7 } @functional({ i32, i1 } %0, [2 x i7] %1) - ; ARGS_ABI: %[[#R:]] = insertvalue { { i1, i7 }, i[[#SBITS]] } undef, { i1, i7 } %r, 0 - ; ARGS_ABI: %[[#R+1]] = insertvalue { { i1, i7 }, i[[#SBITS]] } %[[#R]], i[[#SBITS]] %[[#U]], 1 - ; ARGS_ABI: ret { { i1, i7 }, i[[#SBITS]] } %[[#R+1]] + ; CHECK-LABEL: @call_functional.dfsan + ; CHECK-NEXT: %[[#REG:]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] + ; CHECK-NEXT: %[[#REG+1]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK-NEXT: %[[#REG+2]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %[[#REG+1]], 0 + ; CHECK-NEXT: %[[#REG+3]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %[[#REG+1]], 1 + ; CHECK-NEXT: %[[#REG+4]] = or i[[#SBITS]] %[[#REG+2]], %[[#REG+3]] + ; CHECK-NEXT: %[[#REG+5]] = extractvalue [2 x i[[#SBITS]]] %[[#REG]], 0 + ; CHECK-NEXT: %[[#REG+6]] = extractvalue [2 x i[[#SBITS]]] %[[#REG]], 1 + ; CHECK-NEXT: %[[#REG+7]] = or i[[#SBITS]] %[[#REG+5]], %[[#REG+6]] + ; CHECK-NEXT: %[[#REG+8]] = or i[[#SBITS]] %[[#REG+4]], %[[#REG+7]] + ; CHECK-NEXT: %[[#REG+9]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] %[[#REG+8]], 0 + ; CHECK-NEXT: %[[#REG+10]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#REG+9]], i[[#SBITS]] %[[#REG+8]], 1 + ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } %[[#REG+10]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] %r = call {i1, i7} @functional({i32, i1} %a, [2 x i7] %b) ret {i1, i7} %r } -; TLS_ABI: define { i1, i7 } @discard({ i32, i1 } %a, [2 x i7] %b) +; CHECK: define { i1, i7 } @discard({ i32, i1 } %a, [2 x i7] %b) define {i1, i7} @discard({i32, i1} %a, [2 x i7] %b) { %a1 = extractvalue {i32, i1} %a, 1 %b0 = extractvalue [2 x i7] %b, 0 @@ -52,20 +43,14 @@ } define {i1, i7} @call_discard({i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI: @call_discard.dfsan - ; TLS_ABI: store { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align 2 - - ; ARGS_ABI: @call_discard.dfsan - ; ARGS_ABI: %r = call { i1, i7 } @discard({ i32, i1 } %0, [2 x i7] %1) - ; ARGS_ABI: [[R0:%.*]] = insertvalue { { i1, i7 }, i[[#SBITS]] } undef, { i1, i7 } %r, 0 - ; ARGS_ABI: [[R1:%.*]] = insertvalue { { i1, i7 }, i[[#SBITS]] } [[R0]], i[[#SBITS]] 0, 1 - ; ARGS_ABI: ret { { i1, i7 }, i[[#SBITS]] } [[R1]] + ; CHECK: @call_discard.dfsan + ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align 2 %r = call {i1, i7} @discard({i32, i1} %a, [2 x i7] %b) ret {i1, i7} %r } -; TLS_ABI: define { i1, i7 } @uninstrumented({ i32, i1 } %a, [2 x i7] %b) +; CHECK: define { i1, i7 } @uninstrumented({ i32, i1 } %a, [2 x i7] %b) define {i1, i7} @uninstrumented({i32, i1} %a, [2 x i7] %b) { %a1 = extractvalue {i32, i1} %a, 1 %b0 = extractvalue [2 x i7] %b, 0 @@ -75,116 +60,109 @@ } define {i1, i7} @call_uninstrumented({i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI: @call_uninstrumented.dfsan - ; TLS_ABI: call void @__dfsan_unimplemented - ; TLS_ABI: store { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align 2 - - ; ARGS_ABI: @call_uninstrumented.dfsan - ; ARGS_ABI: call void @__dfsan_unimplemented - ; ARGS_ABI: %r = call { i1, i7 } @uninstrumented({ i32, i1 } %0, [2 x i7] %1) - ; ARGS_ABI: [[R0:%.*]] = insertvalue { { i1, i7 }, i[[#SBITS]] } undef, { i1, i7 } %r, 0 - ; ARGS_ABI: [[R1:%.*]] = insertvalue { { i1, i7 }, i[[#SBITS]] } [[R0]], i[[#SBITS]] 0, 1 - ; ARGS_ABI: ret { { i1, i7 }, i[[#SBITS]] } [[R1]] + ; CHECK: @call_uninstrumented.dfsan + ; CHECK: call void @__dfsan_unimplemented + ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align 2 %r = call {i1, i7} @uninstrumented({i32, i1} %a, [2 x i7] %b) ret {i1, i7} %r } define {i1, i7} @call_custom_with_ret({i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI: @call_custom_with_ret.dfsan - ; TLS_ABI: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]] - ; TLS_ABI: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] - ; TLS_ABI: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; TLS_ABI: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 - ; TLS_ABI: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 - ; TLS_ABI: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] - ; TLS_ABI: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 - ; TLS_ABI: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 - ; TLS_ABI: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] - ; TLS_ABI: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 } %a, [2 x i7] %b, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], i[[#SBITS]]* %labelreturn) - ; TLS_ABI: [[RE:%.*]] = load i[[#SBITS]], i[[#SBITS]]* %labelreturn, align [[#SBYTES]] - ; TLS_ABI: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0 - ; TLS_ABI: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1 - ; TLS_ABI: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; TLS_ABI: ret { i1, i7 } [[R]] + ; CHECK: @call_custom_with_ret.dfsan + ; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]] + ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] + ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 + ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 + ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] + ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 + ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 + ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] + ; CHECK: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 } %a, [2 x i7] %b, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], i[[#SBITS]]* %labelreturn) + ; CHECK: [[RE:%.*]] = load i[[#SBITS]], i[[#SBITS]]* %labelreturn, align [[#SBYTES]] + ; CHECK: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0 + ; CHECK: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1 + ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: ret { i1, i7 } [[R]] %r = call {i1, i7} @custom_with_ret({i32, i1} %a, [2 x i7] %b) ret {i1, i7} %r } define void @call_custom_without_ret({i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI: @call_custom_without_ret.dfsan - ; TLS_ABI: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] - ; TLS_ABI: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; TLS_ABI: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 - ; TLS_ABI: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 - ; TLS_ABI: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] - ; TLS_ABI: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 - ; TLS_ABI: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 - ; TLS_ABI: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] - ; TLS_ABI: call void @__dfsw_custom_without_ret({ i32, i1 } %a, [2 x i7] %b, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]]) + ; CHECK: @call_custom_without_ret.dfsan + ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] + ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 + ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 + ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] + ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 + ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 + ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] + ; CHECK: call void @__dfsw_custom_without_ret({ i32, i1 } %a, [2 x i7] %b, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]]) call void @custom_without_ret({i32, i1} %a, [2 x i7] %b) ret void } define void @call_custom_varg({i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI: @call_custom_varg.dfsan - ; TLS_ABI: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] - ; TLS_ABI: %labelva = alloca [1 x i[[#SBITS]]], align [[#SBYTES]] - ; TLS_ABI: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; TLS_ABI: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 - ; TLS_ABI: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 - ; TLS_ABI: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] - ; TLS_ABI: [[V0:%.*]] = getelementptr inbounds [1 x i[[#SBITS]]], [1 x i[[#SBITS]]]* %labelva, i32 0, i32 0 - ; TLS_ABI: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 - ; TLS_ABI: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 - ; TLS_ABI: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] - ; TLS_ABI: store i[[#SBITS]] [[B01]], i[[#SBITS]]* [[V0]], align [[#SBYTES]] - ; TLS_ABI: [[V:%.*]] = getelementptr inbounds [1 x i[[#SBITS]]], [1 x i[[#SBITS]]]* %labelva, i32 0, i32 0 - ; TLS_ABI: call void ({ i32, i1 }, i[[#SBITS]], i[[#SBITS]]*, ...) @__dfsw_custom_varg({ i32, i1 } %a, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]]* [[V]], [2 x i7] %b) + ; CHECK: @call_custom_varg.dfsan + ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] + ; CHECK: %labelva = alloca [1 x i[[#SBITS]]], align [[#SBYTES]] + ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 + ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 + ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] + ; CHECK: [[V0:%.*]] = getelementptr inbounds [1 x i[[#SBITS]]], [1 x i[[#SBITS]]]* %labelva, i32 0, i32 0 + ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 + ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 + ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] + ; CHECK: store i[[#SBITS]] [[B01]], i[[#SBITS]]* [[V0]], align [[#SBYTES]] + ; CHECK: [[V:%.*]] = getelementptr inbounds [1 x i[[#SBITS]]], [1 x i[[#SBITS]]]* %labelva, i32 0, i32 0 + ; CHECK: call void ({ i32, i1 }, i[[#SBITS]], i[[#SBITS]]*, ...) @__dfsw_custom_varg({ i32, i1 } %a, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]]* [[V]], [2 x i7] %b) call void ({i32, i1}, ...) @custom_varg({i32, i1} %a, [2 x i7] %b) ret void } define {i1, i7} @call_custom_cb({i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI: define { i1, i7 } @call_custom_cb.dfsan({ i32, i1 } %a, [2 x i7] %b) { - ; TLS_ABI: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]] - ; TLS_ABI: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] - ; TLS_ABI: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; TLS_ABI: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 - ; TLS_ABI: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 - ; TLS_ABI: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] - ; TLS_ABI: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 - ; TLS_ABI: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 - ; TLS_ABI: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] - ; TLS_ABI: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_cb({ i1, i7 } ({ i1, i7 } ({ i32, i1 }, [2 x i7])*, { i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)* @"dfst0$custom_cb", i8* bitcast ({ i1, i7 } ({ i32, i1 }, [2 x i7])* @cb.dfsan to i8*), { i32, i1 } %a, [2 x i7] %b, i[[#SBITS]] zeroext 0, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], i[[#SBITS]]* %labelreturn) - ; TLS_ABI: [[RE:%.*]] = load i[[#SBITS]], i[[#SBITS]]* %labelreturn, align [[#SBYTES]] - ; TLS_ABI: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0 - ; TLS_ABI: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1 - ; TLS_ABI: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: define { i1, i7 } @call_custom_cb.dfsan({ i32, i1 } %a, [2 x i7] %b) { + ; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]] + ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] + ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 + ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 + ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] + ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 + ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 + ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] + ; CHECK: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_cb({ i1, i7 } ({ i1, i7 } ({ i32, i1 }, [2 x i7])*, { i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)* @"dfst0$custom_cb", i8* bitcast ({ i1, i7 } ({ i32, i1 }, [2 x i7])* @cb.dfsan to i8*), { i32, i1 } %a, [2 x i7] %b, i[[#SBITS]] zeroext 0, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], i[[#SBITS]]* %labelreturn) + ; CHECK: [[RE:%.*]] = load i[[#SBITS]], i[[#SBITS]]* %labelreturn, align [[#SBYTES]] + ; CHECK: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0 + ; CHECK: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1 + ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] %r = call {i1, i7} @custom_cb({i1, i7} ({i32, i1}, [2 x i7])* @cb, {i32, i1} %a, [2 x i7] %b) ret {i1, i7} %r } define {i1, i7} @custom_cb({i1, i7} ({i32, i1}, [2 x i7])* %cb, {i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI: define { i1, i7 } @custom_cb({ i1, i7 } ({ i32, i1 }, [2 x i7])* %cb, { i32, i1 } %a, [2 x i7] %b) + ; CHECK: define { i1, i7 } @custom_cb({ i1, i7 } ({ i32, i1 }, [2 x i7])* %cb, { i32, i1 } %a, [2 x i7] %b) %r = call {i1, i7} %cb({i32, i1} %a, [2 x i7] %b) ret {i1, i7} %r } define {i1, i7} @cb({i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI: define { i1, i7 } @cb.dfsan({ i32, i1 } %a, [2 x i7] %b) - ; TLS_ABI: [[BL:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] - ; TLS_ABI: [[AL:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; TLS_ABI: [[AL1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[AL]], 1 - ; TLS_ABI: [[BL0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[BL]], 0 - ; TLS_ABI: [[RL0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, i[[#SBITS]] [[AL1]], 0 - ; TLS_ABI: [[RL:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RL0]], i[[#SBITS]] [[BL0]], 1 - ; TLS_ABI: store { i[[#SBITS]], i[[#SBITS]] } [[RL]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: define { i1, i7 } @cb.dfsan({ i32, i1 } %a, [2 x i7] %b) + ; CHECK: [[BL:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] + ; CHECK: [[AL:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: [[AL1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[AL]], 1 + ; CHECK: [[BL0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[BL]], 0 + ; CHECK: [[RL0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, i[[#SBITS]] [[AL1]], 0 + ; CHECK: [[RL:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RL0]], i[[#SBITS]] [[BL0]], 1 + ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[RL]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] %a1 = extractvalue {i32, i1} %a, 1 %b0 = extractvalue [2 x i7] %b, 0 @@ -194,51 +172,51 @@ } define {i1, i7} ({i32, i1}, [2 x i7])* @ret_custom() { - ; TLS_ABI: @ret_custom.dfsan - ; TLS_ABI: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align 2 - ; TLS_ABI: ret {{.*}} @"dfsw$custom_with_ret" + ; CHECK: @ret_custom.dfsan + ; CHECK: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align 2 + ; CHECK: ret {{.*}} @"dfsw$custom_with_ret" ret {i1, i7} ({i32, i1}, [2 x i7])* @custom_with_ret } -; TLS_ABI: define linkonce_odr { i1, i7 } @"dfsw$custom_cb"({ i1, i7 } ({ i32, i1 }, [2 x i7])* %0, { i32, i1 } %1, [2 x i7] %2) { -; TLS_ABI: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]] +; CHECK: define linkonce_odr { i1, i7 } @"dfsw$custom_cb"({ i1, i7 } ({ i32, i1 }, [2 x i7])* %0, { i32, i1 } %1, [2 x i7] %2) { +; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]] ; COMM: TODO simplify the expression [[#mul(2,SBYTES) + max(SBYTES,2)]] to ; COMM: [[#mul(3,SBYTES)]], if shadow-tls-alignment is updated to match shadow ; COMM: width bytes. -; TLS_ABI: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES) + max(SBYTES,2)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] -; TLS_ABI: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] -; TLS_ABI: [[CB:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] -; TLS_ABI: [[CAST:%.*]] = bitcast { i1, i7 } ({ i32, i1 }, [2 x i7])* %0 to i8* -; TLS_ABI: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 -; TLS_ABI: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 -; TLS_ABI: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] -; TLS_ABI: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 -; TLS_ABI: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 -; TLS_ABI: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] -; TLS_ABI: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_cb({ i1, i7 } ({ i1, i7 } ({ i32, i1 }, [2 x i7])*, { i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)* @"dfst0$custom_cb", i8* [[CAST]], { i32, i1 } %1, [2 x i7] %2, i[[#SBITS]] zeroext [[CB]], i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], i[[#SBITS]]* %labelreturn) -; TLS_ABI: [[RE:%.*]] = load i[[#SBITS]], i[[#SBITS]]* %labelreturn, align [[#SBYTES]] -; TLS_ABI: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0 -; TLS_ABI: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1 -; TLS_ABI: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] +; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES) + max(SBYTES,2)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] +; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] +; CHECK: [[CB:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] +; CHECK: [[CAST:%.*]] = bitcast { i1, i7 } ({ i32, i1 }, [2 x i7])* %0 to i8* +; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 +; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 +; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] +; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 +; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 +; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] +; CHECK: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_cb({ i1, i7 } ({ i1, i7 } ({ i32, i1 }, [2 x i7])*, { i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)* @"dfst0$custom_cb", i8* [[CAST]], { i32, i1 } %1, [2 x i7] %2, i[[#SBITS]] zeroext [[CB]], i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], i[[#SBITS]]* %labelreturn) +; CHECK: [[RE:%.*]] = load i[[#SBITS]], i[[#SBITS]]* %labelreturn, align [[#SBYTES]] +; CHECK: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0 +; CHECK: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1 +; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] define {i1, i7} @custom_with_ret({i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI: define linkonce_odr { i1, i7 } @"dfsw$custom_with_ret"({ i32, i1 } %0, [2 x i7] %1) - ; TLS_ABI: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]] - ; TLS_ABI: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] - ; TLS_ABI: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; TLS_ABI: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 - ; TLS_ABI: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 - ; TLS_ABI: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] - ; TLS_ABI: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 - ; TLS_ABI: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 - ; TLS_ABI: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] - ; TLS_ABI: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 } %0, [2 x i7] %1, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], i[[#SBITS]]* %labelreturn) - ; TLS_ABI: [[RE:%.*]] = load i[[#SBITS]], i[[#SBITS]]* %labelreturn, align [[#SBYTES]] - ; TLS_ABI: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0 - ; TLS_ABI: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1 - ; TLS_ABI: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; TLS_ABI: ret { i1, i7 } [[R]] + ; CHECK: define linkonce_odr { i1, i7 } @"dfsw$custom_with_ret"({ i32, i1 } %0, [2 x i7] %1) + ; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]] + ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] + ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 + ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 + ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] + ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 + ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 + ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] + ; CHECK: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 } %0, [2 x i7] %1, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], i[[#SBITS]]* %labelreturn) + ; CHECK: [[RE:%.*]] = load i[[#SBITS]], i[[#SBITS]]* %labelreturn, align [[#SBYTES]] + ; CHECK: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0 + ; CHECK: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1 + ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: ret { i1, i7 } [[R]] %a1 = extractvalue {i32, i1} %a, 1 %b0 = extractvalue [2 x i7] %b, 0 %r0 = insertvalue {i1, i7} undef, i1 %a1, 0 @@ -247,44 +225,44 @@ } define void @custom_without_ret({i32, i1} %a, [2 x i7] %b) { - ; TLS_ABI: define linkonce_odr void @"dfsw$custom_without_ret"({ i32, i1 } %0, [2 x i7] %1) - ; TLS_ABI: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] - ; TLS_ABI: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; TLS_ABI: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 - ; TLS_ABI: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 - ; TLS_ABI: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] - ; TLS_ABI: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 - ; TLS_ABI: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 - ; TLS_ABI: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] - ; TLS_ABI: call void @__dfsw_custom_without_ret({ i32, i1 } %0, [2 x i7] %1, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]]) - ; TLS_ABI: ret + ; CHECK: define linkonce_odr void @"dfsw$custom_without_ret"({ i32, i1 } %0, [2 x i7] %1) + ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] + ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] + ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0 + ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1 + ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]] + ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0 + ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1 + ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]] + ; CHECK: call void @__dfsw_custom_without_ret({ i32, i1 } %0, [2 x i7] %1, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]]) + ; CHECK: ret ret void } define void @custom_varg({i32, i1} %a, ...) { - ; TLS_ABI: define linkonce_odr void @"dfsw$custom_varg"({ i32, i1 } %0, ...) - ; TLS_ABI: call void @__dfsan_vararg_wrapper - ; TLS_ABI: unreachable + ; CHECK: define linkonce_odr void @"dfsw$custom_varg"({ i32, i1 } %0, ...) + ; CHECK: call void @__dfsan_vararg_wrapper + ; CHECK: unreachable ret void } -; TLS_ABI: declare { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*) -; TLS_ABI: declare void @__dfsw_custom_without_ret({ i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]]) -; TLS_ABI: declare void @__dfsw_custom_varg({ i32, i1 }, i[[#SBITS]], i[[#SBITS]]*, ...) - -; TLS_ABI: declare { i1, i7 } @__dfsw_custom_cb({ i1, i7 } ({ i1, i7 } ({ i32, i1 }, [2 x i7])*, { i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)*, i8*, { i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*) - -; TLS_ABI: define linkonce_odr { i1, i7 } @"dfst0$custom_cb"({ i1, i7 } ({ i32, i1 }, [2 x i7])* %0, { i32, i1 } %1, [2 x i7] %2, i[[#SBITS]] %3, i[[#SBITS]] %4, i[[#SBITS]]* %5) { -; TLS_ABI: [[A0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] %3, 0 -; TLS_ABI: [[A1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[A0]], i[[#SBITS]] %3, 1 -; TLS_ABI: [[B0:%.*]] = insertvalue [2 x i[[#SBITS]]] undef, i[[#SBITS]] %4, 0 -; TLS_ABI: [[B1:%.*]] = insertvalue [2 x i[[#SBITS]]] [[B0]], i[[#SBITS]] %4, 1 -; TLS_ABI: store { i[[#SBITS]], i[[#SBITS]] } [[A1]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN:2]] -; TLS_ABI: store [2 x i[[#SBITS]]] [[B1]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN]] -; TLS_ABI: [[R:%.*]] = call { i1, i7 } %0({ i32, i1 } %1, [2 x i7] %2) -; TLS_ABI: %_dfsret = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] -; TLS_ABI: [[RE0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %_dfsret, 0 -; TLS_ABI: [[RE1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %_dfsret, 1 -; TLS_ABI: [[RE01:%.*]] = or i[[#SBITS]] [[RE0]], [[RE1]] -; TLS_ABI: store i[[#SBITS]] [[RE01]], i[[#SBITS]]* %5, align [[#SBYTES]] -; TLS_ABI: ret { i1, i7 } [[R]] +; CHECK: declare { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*) +; CHECK: declare void @__dfsw_custom_without_ret({ i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]]) +; CHECK: declare void @__dfsw_custom_varg({ i32, i1 }, i[[#SBITS]], i[[#SBITS]]*, ...) + +; CHECK: declare { i1, i7 } @__dfsw_custom_cb({ i1, i7 } ({ i1, i7 } ({ i32, i1 }, [2 x i7])*, { i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)*, i8*, { i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*) + +; CHECK: define linkonce_odr { i1, i7 } @"dfst0$custom_cb"({ i1, i7 } ({ i32, i1 }, [2 x i7])* %0, { i32, i1 } %1, [2 x i7] %2, i[[#SBITS]] %3, i[[#SBITS]] %4, i[[#SBITS]]* %5) { +; CHECK: [[A0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] %3, 0 +; CHECK: [[A1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[A0]], i[[#SBITS]] %3, 1 +; CHECK: [[B0:%.*]] = insertvalue [2 x i[[#SBITS]]] undef, i[[#SBITS]] %4, 0 +; CHECK: [[B1:%.*]] = insertvalue [2 x i[[#SBITS]]] [[B0]], i[[#SBITS]] %4, 1 +; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[A1]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN:2]] +; CHECK: store [2 x i[[#SBITS]]] [[B1]], [2 x i[[#SBITS]]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to [2 x i[[#SBITS]]]*), align [[ALIGN]] +; CHECK: [[R:%.*]] = call { i1, i7 } %0({ i32, i1 } %1, [2 x i7] %2) +; CHECK: %_dfsret = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] +; CHECK: [[RE0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %_dfsret, 0 +; CHECK: [[RE1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %_dfsret, 1 +; CHECK: [[RE01:%.*]] = or i[[#SBITS]] [[RE0]], [[RE1]] +; CHECK: store i[[#SBITS]] [[RE01]], i[[#SBITS]]* %5, align [[#SBYTES]] +; CHECK: ret { i1, i7 } [[R]] diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/args-unreachable-bb.ll b/llvm/test/Instrumentation/DataFlowSanitizer/args-unreachable-bb.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/args-unreachable-bb.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/args-unreachable-bb.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -dfsan -verify -dfsan-args-abi -S | FileCheck %s +; RUN: opt < %s -dfsan -verify -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -7,7 +7,7 @@ ; CHECK-LABEL: @unreachable_bb1.dfsan define i8 @unreachable_bb1() { - ; CHECK: ret { i8, i[[#SBITS]] } { i8 1, i[[#SBITS]] 0 } + ; CHECK: ret i8 1 ; CHECK-NOT: bb2: ; CHECK-NOT: bb3: ; CHECK-NOT: bb4: diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/array.ll b/llvm/test/Instrumentation/DataFlowSanitizer/array.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/array.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/array.ll @@ -1,5 +1,4 @@ ; RUN: opt < %s -dfsan -dfsan-event-callbacks=true -S | FileCheck %s --check-prefixes=CHECK,EVENT_CALLBACKS -; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s --check-prefixes=CHECK,ARGS_ABI ; RUN: opt < %s -dfsan -S | FileCheck %s --check-prefixes=CHECK,FAST ; RUN: opt < %s -dfsan -dfsan-combine-pointer-labels-on-load=false -S | FileCheck %s --check-prefixes=CHECK,NO_COMBINE_LOAD_PTR ; RUN: opt < %s -dfsan -dfsan-combine-pointer-labels-on-store=true -S | FileCheck %s --check-prefixes=CHECK,COMBINE_STORE_PTR @@ -17,9 +16,6 @@ ; NO_COMBINE_LOAD_PTR: %1 = load [4 x i[[#SBITS]]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x i[[#SBITS]]]*), align [[ALIGN:2]] ; NO_COMBINE_LOAD_PTR: store [4 x i[[#SBITS]]] %1, [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [4 x i[[#SBITS]]]*), align [[ALIGN]] - ; ARGS_ABI: @pass_array.dfsan - ; ARGS_ABI: ret { [4 x i8], i[[#SBITS]] } - ; DEBUG_NONZERO_LABELS: @pass_array.dfsan ; DEBUG_NONZERO_LABELS: [[L:%.*]] = load [4 x i[[#SBITS]]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x i[[#SBITS]]]*), align [[ALIGN:2]] ; DEBUG_NONZERO_LABELS: [[L0:%.*]] = extractvalue [4 x i[[#SBITS]]] [[L]], 0 @@ -42,8 +38,6 @@ ; NO_COMBINE_LOAD_PTR: %1 = load [4 x { i[[#SBITS]], i[[#SBITS]] }], [4 x { i[[#SBITS]], i[[#SBITS]] }]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x { i[[#SBITS]], i[[#SBITS]] }]*), align [[ALIGN:2]] ; NO_COMBINE_LOAD_PTR: store [4 x { i[[#SBITS]], i[[#SBITS]] }] %1, [4 x { i[[#SBITS]], i[[#SBITS]] }]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [4 x { i[[#SBITS]], i[[#SBITS]] }]*), align [[ALIGN]] - ; ARGS_ABI: @pass_array_of_struct.dfsan - ; ARGS_ABI: ret { [4 x { i8*, i32 }], i[[#SBITS]] } ret %ArrayOfStruct %as } diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_callback_attributes.ll b/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_callback_attributes.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_callback_attributes.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_callback_attributes.ll @@ -1,5 +1,4 @@ -; RUN: opt < %s -dfsan -dfsan-args-abi -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s -; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s +; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s target triple = "x86_64-unknown-linux-gnu" ; Declare custom functions. Inputs/abilist.txt causes any function with a diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll b/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll @@ -1,5 +1,4 @@ -; RUN: opt < %s -dfsan -dfsan-args-abi -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s -; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s +; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s target triple = "x86_64-unknown-linux-gnu" ; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]] diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/debug-nonzero-labels.ll b/llvm/test/Instrumentation/DataFlowSanitizer/debug-nonzero-labels.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/debug-nonzero-labels.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/debug-nonzero-labels.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -dfsan -dfsan-args-abi -dfsan-debug-nonzero-labels -S | FileCheck %s +; RUN: opt < %s -dfsan -dfsan-debug-nonzero-labels -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -7,19 +7,21 @@ declare i32 @g() -; CHECK: define { i32, i[[#SBITS]] } @f.dfsan(i32 %0, i32 %1, i[[#SBITS]] %2, i[[#SBITS]] %3) +; CHECK: define i32 @f.dfsan(i32 %0, i32 %1) define i32 @f(i32, i32) { - ; CHECK: [[LOCALLABELALLOCA:%.*]] = alloca i[[#SBITS]] + ; CHECK: [[ARGLABEL1:%.*]] = load i[[#SBITS]], {{.*}} @__dfsan_arg_tls %i = alloca i32 - ; CHECK: [[ARGCMP1:%.*]] = icmp ne i[[#SBITS]] %3, 0 + ; CHECK: [[ARGCMP1:%.*]] = icmp ne i[[#SBITS]] [[ARGLABEL1]], 0 ; CHECK: br i1 [[ARGCMP1]] - ; CHECK: [[ARGCMP2:%.*]] = icmp ne i[[#SBITS]] %2, 0 + ; CHECK: [[ARGLABEL2:%.*]] = load i[[#SBITS]], {{.*}} @__dfsan_arg_tls + ; CHECK: [[LOCALLABELALLOCA:%.*]] = alloca i[[#SBITS]] + ; CHECK: [[ARGCMP2:%.*]] = icmp ne i[[#SBITS]] [[ARGLABEL2]], 0 ; CHECK: br i1 [[ARGCMP2]] %x = add i32 %0, %1 store i32 %x, i32* %i - ; CHECK: [[CALL:%.*]] = call { i32, i[[#SBITS]] } @g.dfsan() - ; CHECK: [[CALLLABEL:%.*]] = extractvalue { i32, i[[#SBITS]] } [[CALL]], 1 - ; CHECK: [[CALLCMP:%.*]] = icmp ne i[[#SBITS]] [[CALLLABEL]], 0 + ; CHECK: [[CALL:%.*]] = call i32 @g.dfsan() + ; CHECK: [[RETLABEL:%.*]] = load i[[#SBITS]], {{.*}} @__dfsan_retval_tls + ; CHECK: [[CALLCMP:%.*]] = icmp ne i[[#SBITS]] [[RETLABEL]], 0 ; CHECK: br i1 [[CALLCMP]] %call = call i32 @g() ; CHECK: [[LOCALLABEL:%.*]] = load i[[#SBITS]], i[[#SBITS]]* [[LOCALLABELALLOCA]] diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/memset.ll b/llvm/test/Instrumentation/DataFlowSanitizer/memset.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/memset.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/memset.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s +; RUN: opt < %s -dfsan -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -9,8 +9,9 @@ define void @ms(i8* %p, i8 %v) { ; CHECK-LABEL: @ms.dfsan - ; CHECK-SAME: (i8* %0, i8 %1, i[[#SBITS]] %2, i[[#SBITS]] %3) - ; CHECK: call void @__dfsan_set_label(i[[#SBITS]] %3, i32 0, i8* %0, i64 1) + ; CHECK-SAME: (i8* %p, i8 %v) + ; CHECK: %[[ARGLABEL:.*]] = load i[[#SBITS]], {{.*}} @__dfsan_arg_tls + ; CHECK: call void @__dfsan_set_label(i[[#SBITS]] %[[ARGLABEL]], i32 0, i8* %p, i64 1) call void @llvm.memset.p0i8.i64(i8* %p, i8 %v, i64 1, i1 1) ret void } diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/prefix-rename.ll b/llvm/test/Instrumentation/DataFlowSanitizer/prefix-rename.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/prefix-rename.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/prefix-rename.ll @@ -1,5 +1,4 @@ ; RUN: opt < %s -dfsan -S | FileCheck %s -; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/struct.ll b/llvm/test/Instrumentation/DataFlowSanitizer/struct.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/struct.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/struct.ll @@ -1,5 +1,4 @@ ; RUN: opt < %s -dfsan -dfsan-event-callbacks=true -S | FileCheck %s --check-prefixes=CHECK,EVENT_CALLBACKS -; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s --check-prefixes=CHECK,ARGS_ABI ; RUN: opt < %s -dfsan -S | FileCheck %s --check-prefixes=CHECK,FAST ; RUN: opt < %s -dfsan -dfsan-combine-pointer-labels-on-load=false -S | FileCheck %s --check-prefixes=CHECK,NO_COMBINE_LOAD_PTR ; RUN: opt < %s -dfsan -dfsan-combine-pointer-labels-on-store=true -S | FileCheck %s --check-prefixes=CHECK,COMBINE_STORE_PTR @@ -18,10 +17,6 @@ ; NO_COMBINE_LOAD_PTR: [[L:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN:2]] ; NO_COMBINE_LOAD_PTR: store { i[[#SBITS]], i[[#SBITS]] } [[L]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]] - ; ARGS_ABI: @pass_struct.dfsan - ; ARGS_ABI-SAME: ({ i8*, i32 } {{%.*}}, i[[#SBITS]] {{%.*}}) - ; ARGS_ABI: ret { { i8*, i32 }, i[[#SBITS]] } - ; DEBUG_NONZERO_LABELS: @pass_struct.dfsan ; DEBUG_NONZERO_LABELS: [[L:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN:2]] ; DEBUG_NONZERO_LABELS: [[L0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[L]], 0 @@ -41,8 +36,6 @@ ; NO_COMBINE_LOAD_PTR: %1 = load { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }, { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }*), align [[ALIGN:2]] ; NO_COMBINE_LOAD_PTR: store { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } } %1, { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }*), align [[ALIGN]] - ; ARGS_ABI: @pass_struct_of_aggregate.dfsan - ; ARGS_ABI: ret { %StructOfAggr, i[[#SBITS]] } ret %StructOfAggr %s } diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll b/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll @@ -1,5 +1,4 @@ -; RUN: opt < %s -dfsan -dfsan-args-abi -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s --check-prefixes=CHECK,ARGS_ABI -; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s --check-prefixes=CHECK,TLS_ABI +; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s target triple = "x86_64-unknown-linux-gnu" ; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]] @@ -13,8 +12,7 @@ %call = call i8 @uninstrumented_internal_fun(i8 %in) ret i8 %call } -; TLS_ABI: define internal i8 @"dfsw$uninstrumented_internal_fun" -; ARGS_ABI: define internal { i8, i[[#SBITS]] } @"dfsw$uninstrumented_internal_fun" +; CHECK: define internal i8 @"dfsw$uninstrumented_internal_fun" define private i8 @uninstrumented_private_fun(i8 %in) { ret i8 %in @@ -24,5 +22,4 @@ %call = call i8 @uninstrumented_private_fun(i8 %in) ret i8 %call } -; TLS_ABI: define private i8 @"dfsw$uninstrumented_private_fun" -; ARGS_ABI: define private { i8, i[[#SBITS]] } @"dfsw$uninstrumented_private_fun" +; CHECK: define private i8 @"dfsw$uninstrumented_private_fun" diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll b/llvm/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s +; RUN: opt < %s -dfsan -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll b/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll @@ -1,5 +1,4 @@ -; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s --check-prefixes=CHECK,ARGS_ABI -; RUN: opt < %s -dfsan -S | FileCheck %s --check-prefixes=CHECK,TLS_ABI +; RUN: opt < %s -dfsan -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -7,22 +6,16 @@ ; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]] define <4 x i4> @pass_vector(<4 x i4> %v) { - ; ARGS_ABI-LABEL: @pass_vector.dfsan - ; ARGS_ABI-SAME: (<4 x i4> %[[VEC:.*]], i[[#SBITS]] %[[LABEL:.*]]) - ; ARGS_ABI-NEXT: %[[#REG:]] = insertvalue { <4 x i4>, i[[#SBITS]] } undef, <4 x i4> %[[VEC]], 0 - ; ARGS_ABI-NEXT: %[[#REG+1]] = insertvalue { <4 x i4>, i[[#SBITS]] } %[[#REG]], i[[#SBITS]] %[[LABEL]], 1 - ; ARGS_ABI-NEXT: ret { <4 x i4>, i[[#SBITS]] } - - ; TLS_ABI-LABEL: @pass_vector.dfsan - ; TLS_ABI-NEXT: %[[#REG:]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] - ; TLS_ABI-NEXT: store i[[#SBITS]] %[[#REG]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] - ; TLS_ABI-NEXT: ret <4 x i4> %v + ; CHECK-LABEL: @pass_vector.dfsan + ; CHECK-NEXT: %[[#REG:]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] + ; CHECK-NEXT: store i[[#SBITS]] %[[#REG]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; CHECK-NEXT: ret <4 x i4> %v ret <4 x i4> %v } define void @load_update_store_vector(<4 x i4>* %p) { - ; TLS_ABI-LABEL: @load_update_store_vector.dfsan - ; TLS_ABI: {{.*}} = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2 + ; CHECK-LABEL: @load_update_store_vector.dfsan + ; CHECK: {{.*}} = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2 %v = load <4 x i4>, <4 x i4>* %p %e2 = extractelement <4 x i4> %v, i32 2 @@ -32,35 +25,35 @@ } define <4 x i1> @icmp_vector(<4 x i8> %a, <4 x i8> %b) { - ; TLS_ABI-LABEL: @icmp_vector.dfsan - ; TLS_ABI-NEXT: %[[B:.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN:2]] - ; TLS_ABI-NEXT: %[[A:.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] - ; TLS_ABI: %[[L:.*]] = or i[[#SBITS]] %[[A]], %[[B]] + ; CHECK-LABEL: @icmp_vector.dfsan + ; CHECK-NEXT: %[[B:.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN:2]] + ; CHECK-NEXT: %[[A:.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] + ; CHECK: %[[L:.*]] = or i[[#SBITS]] %[[A]], %[[B]] - ; TLS_ABI: %r = icmp eq <4 x i8> %a, %b - ; TLS_ABI: store i[[#SBITS]] %[[L]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] - ; TLS_ABI: ret <4 x i1> %r + ; CHECK: %r = icmp eq <4 x i8> %a, %b + ; CHECK: store i[[#SBITS]] %[[L]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; CHECK: ret <4 x i1> %r %r = icmp eq <4 x i8> %a, %b ret <4 x i1> %r } define <2 x i32> @const_vector() { - ; TLS_ABI-LABEL: @const_vector.dfsan - ; TLS_ABI-NEXT: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align 2 - ; TLS_ABI-NEXT: ret <2 x i32> + ; CHECK-LABEL: @const_vector.dfsan + ; CHECK-NEXT: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align 2 + ; CHECK-NEXT: ret <2 x i32> ret <2 x i32> < i32 42, i32 11 > } define <4 x i4> @call_vector(<4 x i4> %v) { - ; TLS_ABI-LABEL: @call_vector.dfsan - ; TLS_ABI-NEXT: %[[V:.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] - ; TLS_ABI-NEXT: store i[[#SBITS]] %[[V]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] - ; TLS_ABI-NEXT: %r = call <4 x i4> @pass_vector.dfsan(<4 x i4> %v) - ; TLS_ABI-NEXT: %_dfsret = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] - ; TLS_ABI-NEXT: store i[[#SBITS]] %_dfsret, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] - ; TLS_ABI-NEXT: ret <4 x i4> %r + ; CHECK-LABEL: @call_vector.dfsan + ; CHECK-NEXT: %[[V:.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] + ; CHECK-NEXT: store i[[#SBITS]] %[[V]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] + ; CHECK-NEXT: %r = call <4 x i4> @pass_vector.dfsan(<4 x i4> %v) + ; CHECK-NEXT: %_dfsret = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; CHECK-NEXT: store i[[#SBITS]] %_dfsret, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; CHECK-NEXT: ret <4 x i4> %r %r = call <4 x i4> @pass_vector(<4 x i4> %v) ret <4 x i4> %r