Index: llvm/lib/IR/Verifier.cpp =================================================================== --- llvm/lib/IR/Verifier.cpp +++ llvm/lib/IR/Verifier.cpp @@ -65,7 +65,6 @@ #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/Comdat.h" #include "llvm/IR/Constant.h" @@ -140,21 +139,20 @@ } void Write(const Value *V) { - if (!V) - return; + if (V) + Write(*V); + } + + void Write(const Value &V) { if (isa(V)) { - V->print(*OS, MST); + V.print(*OS, MST); *OS << '\n'; } else { - V->printAsOperand(*OS, true, MST); + V.printAsOperand(*OS, true, MST); *OS << '\n'; } } - void Write(ImmutableCallSite CS) { - Write(CS.getInstruction()); - } - void Write(const Metadata *MD) { if (!MD) return; @@ -471,7 +469,7 @@ void visitSelectInst(SelectInst &SI); void visitUserOp1(Instruction &I); void visitUserOp2(Instruction &I) { visitUserOp1(I); } - void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS); + void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call); void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI); void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII); void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI); @@ -491,8 +489,8 @@ void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch); void visitCleanupReturnInst(CleanupReturnInst &CRI); - void verifyCallSite(CallSite CS); - void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal); + void verifyCallBase(CallBase &Call); + void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal); void verifySwiftErrorValue(const Value *SwiftErrorVal); void verifyMustTailCall(CallInst &CI); bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT, @@ -507,7 +505,7 @@ void visitConstantExprsRecursively(const Constant *EntryC); void visitConstantExpr(const ConstantExpr *CE); - void verifyStatepoint(ImmutableCallSite CS); + void verifyStatepoint(const CallBase &Call); void verifyFrameRecoverIndices(); void verifySiblingFuncletUnwinds(); @@ -1893,134 +1891,136 @@ } /// Verify that statepoint intrinsic is well formed. -void Verifier::verifyStatepoint(ImmutableCallSite CS) { - assert(CS.getCalledFunction() && - CS.getCalledFunction()->getIntrinsicID() == - Intrinsic::experimental_gc_statepoint); - - const Instruction &CI = *CS.getInstruction(); +void Verifier::verifyStatepoint(const CallBase &Call) { + assert(Call.getCalledFunction() && + Call.getCalledFunction()->getIntrinsicID() == + Intrinsic::experimental_gc_statepoint); - Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() && - !CS.onlyAccessesArgMemory(), + Assert(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() && + !Call.onlyAccessesArgMemory(), "gc.statepoint must read and write all memory to preserve " "reordering restrictions required by safepoint semantics", - &CI); + Call); - const Value *IDV = CS.getArgument(0); + const Value *IDV = Call.getArgOperand(0); Assert(isa(IDV), "gc.statepoint ID must be a constant integer", - &CI); + Call); - const Value *NumPatchBytesV = CS.getArgument(1); + const Value *NumPatchBytesV = Call.getArgOperand(1); Assert(isa(NumPatchBytesV), "gc.statepoint number of patchable bytes must be a constant integer", - &CI); + Call); const int64_t NumPatchBytes = cast(NumPatchBytesV)->getSExtValue(); assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!"); - Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be " - "positive", - &CI); + Assert(NumPatchBytes >= 0, + "gc.statepoint number of patchable bytes must be " + "positive", + Call); - const Value *Target = CS.getArgument(2); + const Value *Target = Call.getArgOperand(2); auto *PT = dyn_cast(Target->getType()); Assert(PT && PT->getElementType()->isFunctionTy(), - "gc.statepoint callee must be of function pointer type", &CI, Target); + "gc.statepoint callee must be of function pointer type", Call, Target); FunctionType *TargetFuncType = cast(PT->getElementType()); - const Value *NumCallArgsV = CS.getArgument(3); + const Value *NumCallArgsV = Call.getArgOperand(3); Assert(isa(NumCallArgsV), "gc.statepoint number of arguments to underlying call " "must be constant integer", - &CI); + Call); const int NumCallArgs = cast(NumCallArgsV)->getZExtValue(); Assert(NumCallArgs >= 0, "gc.statepoint number of arguments to underlying call " "must be positive", - &CI); + Call); const int NumParams = (int)TargetFuncType->getNumParams(); if (TargetFuncType->isVarArg()) { Assert(NumCallArgs >= NumParams, - "gc.statepoint mismatch in number of vararg call args", &CI); + "gc.statepoint mismatch in number of vararg call args", Call); // TODO: Remove this limitation Assert(TargetFuncType->getReturnType()->isVoidTy(), "gc.statepoint doesn't support wrapping non-void " "vararg functions yet", - &CI); + Call); } else Assert(NumCallArgs == NumParams, - "gc.statepoint mismatch in number of call args", &CI); + "gc.statepoint mismatch in number of call args", Call); - const Value *FlagsV = CS.getArgument(4); + const Value *FlagsV = Call.getArgOperand(4); Assert(isa(FlagsV), - "gc.statepoint flags must be constant integer", &CI); + "gc.statepoint flags must be constant integer", Call); const uint64_t Flags = cast(FlagsV)->getZExtValue(); Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0, - "unknown flag used in gc.statepoint flags argument", &CI); + "unknown flag used in gc.statepoint flags argument", Call); // Verify that the types of the call parameter arguments match // the type of the wrapped callee. - AttributeList Attrs = CS.getAttributes(); + AttributeList Attrs = Call.getAttributes(); for (int i = 0; i < NumParams; i++) { Type *ParamType = TargetFuncType->getParamType(i); - Type *ArgType = CS.getArgument(5 + i)->getType(); + Type *ArgType = Call.getArgOperand(5 + i)->getType(); Assert(ArgType == ParamType, "gc.statepoint call argument does not match wrapped " "function type", - &CI); + Call); if (TargetFuncType->isVarArg()) { AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i); Assert(!ArgAttrs.hasAttribute(Attribute::StructRet), - "Attribute 'sret' cannot be used for vararg call arguments!", &CI); + "Attribute 'sret' cannot be used for vararg call arguments!", + Call); } } const int EndCallArgsInx = 4 + NumCallArgs; - const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1); + const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1); Assert(isa(NumTransitionArgsV), "gc.statepoint number of transition arguments " "must be constant integer", - &CI); + Call); const int NumTransitionArgs = cast(NumTransitionArgsV)->getZExtValue(); Assert(NumTransitionArgs >= 0, - "gc.statepoint number of transition arguments must be positive", &CI); + "gc.statepoint number of transition arguments must be positive", Call); const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs; - const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1); + const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1); Assert(isa(NumDeoptArgsV), "gc.statepoint number of deoptimization arguments " "must be constant integer", - &CI); + Call); const int NumDeoptArgs = cast(NumDeoptArgsV)->getZExtValue(); - Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments " - "must be positive", - &CI); + Assert(NumDeoptArgs >= 0, + "gc.statepoint number of deoptimization arguments " + "must be positive", + Call); const int ExpectedNumArgs = 7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs; - Assert(ExpectedNumArgs <= (int)CS.arg_size(), - "gc.statepoint too few arguments according to length fields", &CI); + Assert(ExpectedNumArgs <= (int)Call.arg_size(), + "gc.statepoint too few arguments according to length fields", Call); // Check that the only uses of this gc.statepoint are gc.result or // gc.relocate calls which are tied to this statepoint and thus part // of the same statepoint sequence - for (const User *U : CI.users()) { - const CallInst *Call = dyn_cast(U); - Assert(Call, "illegal use of statepoint token", &CI, U); - if (!Call) continue; - Assert(isa(Call) || isa(Call), + for (const User *U : Call.users()) { + const CallInst *UserCall = dyn_cast(U); + Assert(UserCall, "illegal use of statepoint token", Call, U); + if (!UserCall) + continue; + Assert(isa(UserCall) || isa(UserCall), "gc.result or gc.relocate are the only value uses " "of a gc.statepoint", - &CI, U); - if (isa(Call)) { - Assert(Call->getArgOperand(0) == &CI, - "gc.result connected to wrong gc.statepoint", &CI, Call); + Call, U); + if (isa(UserCall)) { + Assert(UserCall->getArgOperand(0) == &Call, + "gc.result connected to wrong gc.statepoint", Call, UserCall); } else if (isa(Call)) { - Assert(Call->getArgOperand(0) == &CI, - "gc.relocate connected to wrong gc.statepoint", &CI, Call); + Assert(UserCall->getArgOperand(0) == &Call, + "gc.relocate connected to wrong gc.statepoint", Call, UserCall); } } @@ -2745,77 +2745,79 @@ visitInstruction(PN); } -void Verifier::verifyCallSite(CallSite CS) { - Instruction *I = CS.getInstruction(); - - Assert(CS.getCalledValue()->getType()->isPointerTy(), - "Called function must be a pointer!", I); - PointerType *FPTy = cast(CS.getCalledValue()->getType()); +void Verifier::verifyCallBase(CallBase &Call) { + Assert(Call.getCalledValue()->getType()->isPointerTy(), + "Called function must be a pointer!", Call); + PointerType *FPTy = cast(Call.getCalledValue()->getType()); Assert(FPTy->getElementType()->isFunctionTy(), - "Called function is not pointer to function type!", I); + "Called function is not pointer to function type!", Call); - Assert(FPTy->getElementType() == CS.getFunctionType(), - "Called function is not the same type as the call!", I); + Assert(FPTy->getElementType() == Call.getFunctionType(), + "Called function is not the same type as the call!", Call); - FunctionType *FTy = CS.getFunctionType(); + FunctionType *FTy = Call.getFunctionType(); // Verify that the correct number of arguments are being passed if (FTy->isVarArg()) - Assert(CS.arg_size() >= FTy->getNumParams(), - "Called function requires more parameters than were provided!", I); + Assert(Call.arg_size() >= FTy->getNumParams(), + "Called function requires more parameters than were provided!", + Call); else - Assert(CS.arg_size() == FTy->getNumParams(), - "Incorrect number of arguments passed to called function!", I); + Assert(Call.arg_size() == FTy->getNumParams(), + "Incorrect number of arguments passed to called function!", Call); // Verify that all arguments to the call match the function type. for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) - Assert(CS.getArgument(i)->getType() == FTy->getParamType(i), + Assert(Call.getArgOperand(i)->getType() == FTy->getParamType(i), "Call parameter type does not match function signature!", - CS.getArgument(i), FTy->getParamType(i), I); + Call.getArgOperand(i), FTy->getParamType(i), Call); - AttributeList Attrs = CS.getAttributes(); + AttributeList Attrs = Call.getAttributes(); - Assert(verifyAttributeCount(Attrs, CS.arg_size()), - "Attribute after last parameter!", I); + Assert(verifyAttributeCount(Attrs, Call.arg_size()), + "Attribute after last parameter!", Call); if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) { // Don't allow speculatable on call sites, unless the underlying function // declaration is also speculatable. - Function *Callee - = dyn_cast(CS.getCalledValue()->stripPointerCasts()); + Function *Callee = + dyn_cast(Call.getCalledValue()->stripPointerCasts()); Assert(Callee && Callee->isSpeculatable(), - "speculatable attribute may not apply to call sites", I); + "speculatable attribute may not apply to call sites", Call); } // Verify call attributes. - verifyFunctionAttrs(FTy, Attrs, I); + verifyFunctionAttrs(FTy, Attrs, &Call); // Conservatively check the inalloca argument. // We have a bug if we can find that there is an underlying alloca without // inalloca. - if (CS.hasInAllocaArgument()) { - Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1); + if (Call.hasInAllocaArgument()) { + Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1); if (auto AI = dyn_cast(InAllocaArg->stripInBoundsOffsets())) Assert(AI->isUsedWithInAlloca(), - "inalloca argument for call has mismatched alloca", AI, I); + "inalloca argument for call has mismatched alloca", AI, Call); } // For each argument of the callsite, if it has the swifterror argument, // make sure the underlying alloca/parameter it comes from has a swifterror as // well. for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) - if (CS.paramHasAttr(i, Attribute::SwiftError)) { - Value *SwiftErrorArg = CS.getArgument(i); + if (Call.paramHasAttr(i, Attribute::SwiftError)) { + Value *SwiftErrorArg = Call.getArgOperand(i); if (auto AI = dyn_cast(SwiftErrorArg->stripInBoundsOffsets())) { Assert(AI->isSwiftError(), - "swifterror argument for call has mismatched alloca", AI, I); + "swifterror argument for call has mismatched alloca", AI, Call); continue; } auto ArgI = dyn_cast(SwiftErrorArg); - Assert(ArgI, "swifterror argument should come from an alloca or parameter", SwiftErrorArg, I); + Assert(ArgI, + "swifterror argument should come from an alloca or parameter", + SwiftErrorArg, Call); Assert(ArgI->hasSwiftErrorAttr(), - "swifterror argument for call has mismatched parameter", ArgI, I); + "swifterror argument for call has mismatched parameter", ArgI, + Call); } if (FTy->isVarArg()) { @@ -2831,95 +2833,97 @@ } // Check attributes on the varargs part. - for (unsigned Idx = FTy->getNumParams(); Idx < CS.arg_size(); ++Idx) { - Type *Ty = CS.getArgument(Idx)->getType(); + for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) { + Type *Ty = Call.getArgOperand(Idx)->getType(); AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx); - verifyParameterAttrs(ArgAttrs, Ty, I); + verifyParameterAttrs(ArgAttrs, Ty, &Call); if (ArgAttrs.hasAttribute(Attribute::Nest)) { - Assert(!SawNest, "More than one parameter has attribute nest!", I); + Assert(!SawNest, "More than one parameter has attribute nest!", Call); SawNest = true; } if (ArgAttrs.hasAttribute(Attribute::Returned)) { Assert(!SawReturned, "More than one parameter has attribute returned!", - I); + Call); Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()), "Incompatible argument and return types for 'returned' " "attribute", - I); + Call); SawReturned = true; } // Statepoint intrinsic is vararg but the wrapped function may be not. // Allow sret here and check the wrapped function in verifyStatepoint. - if (CS.getCalledFunction() == nullptr || - CS.getCalledFunction()->getIntrinsicID() != - Intrinsic::experimental_gc_statepoint) + if (Call.getCalledFunction() == nullptr || + Call.getCalledFunction()->getIntrinsicID() != + Intrinsic::experimental_gc_statepoint) Assert(!ArgAttrs.hasAttribute(Attribute::StructRet), - "Attribute 'sret' cannot be used for vararg call arguments!", I); + "Attribute 'sret' cannot be used for vararg call arguments!", + Call); if (ArgAttrs.hasAttribute(Attribute::InAlloca)) - Assert(Idx == CS.arg_size() - 1, "inalloca isn't on the last argument!", - I); + Assert(Idx == Call.arg_size() - 1, + "inalloca isn't on the last argument!", Call); } } // Verify that there's no metadata unless it's a direct call to an intrinsic. - if (CS.getCalledFunction() == nullptr || - !CS.getCalledFunction()->getName().startswith("llvm.")) { + if (Call.getCalledFunction() == nullptr || + !Call.getCalledFunction()->getName().startswith("llvm.")) { for (Type *ParamTy : FTy->params()) { Assert(!ParamTy->isMetadataTy(), - "Function has metadata parameter but isn't an intrinsic", I); + "Function has metadata parameter but isn't an intrinsic", Call); Assert(!ParamTy->isTokenTy(), - "Function has token parameter but isn't an intrinsic", I); + "Function has token parameter but isn't an intrinsic", Call); } } // Verify that indirect calls don't return tokens. - if (CS.getCalledFunction() == nullptr) + if (Call.getCalledFunction() == nullptr) Assert(!FTy->getReturnType()->isTokenTy(), "Return type cannot be token for indirect call!"); - if (Function *F = CS.getCalledFunction()) + if (Function *F = Call.getCalledFunction()) if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) - visitIntrinsicCallSite(ID, CS); + visitIntrinsicCall(ID, Call); // Verify that a callsite has at most one "deopt", at most one "funclet" and // at most one "gc-transition" operand bundle. bool FoundDeoptBundle = false, FoundFuncletBundle = false, FoundGCTransitionBundle = false; - for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) { - OperandBundleUse BU = CS.getOperandBundleAt(i); + for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) { + OperandBundleUse BU = Call.getOperandBundleAt(i); uint32_t Tag = BU.getTagID(); if (Tag == LLVMContext::OB_deopt) { - Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I); + Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", Call); FoundDeoptBundle = true; } else if (Tag == LLVMContext::OB_gc_transition) { Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles", - I); + Call); FoundGCTransitionBundle = true; } else if (Tag == LLVMContext::OB_funclet) { - Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I); + Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", Call); FoundFuncletBundle = true; Assert(BU.Inputs.size() == 1, - "Expected exactly one funclet bundle operand", I); + "Expected exactly one funclet bundle operand", Call); Assert(isa(BU.Inputs.front()), "Funclet bundle operands should correspond to a FuncletPadInst", - I); + Call); } } // Verify that each inlinable callsite of a debug-info-bearing function in a // debug-info-bearing function has a debug location attached to it. Failure to // do so causes assertion failures when the inliner sets up inline scope info. - if (I->getFunction()->getSubprogram() && CS.getCalledFunction() && - CS.getCalledFunction()->getSubprogram()) - AssertDI(I->getDebugLoc(), "inlinable function call in a function with " - "debug info must have a !dbg location", - I); + if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() && + Call.getCalledFunction()->getSubprogram()) + AssertDI(Call.getDebugLoc(), + "inlinable function call in a function with " + "debug info must have a !dbg location", + Call); - visitInstruction(*I); + visitInstruction(Call); } /// Two types are "congruent" if they are identical, or if they are both pointer @@ -3014,14 +3018,14 @@ } void Verifier::visitCallInst(CallInst &CI) { - verifyCallSite(&CI); + verifyCallBase(CI); if (CI.isMustTailCall()) verifyMustTailCall(CI); } void Verifier::visitInvokeInst(InvokeInst &II) { - verifyCallSite(&II); + verifyCallBase(II); // Verify that the first non-PHI instruction of the unwind destination is an // exception handling instruction. @@ -3330,16 +3334,15 @@ } /// Check that SwiftErrorVal is used as a swifterror argument in CS. -void Verifier::verifySwiftErrorCallSite(CallSite CS, - const Value *SwiftErrorVal) { +void Verifier::verifySwiftErrorCall(CallBase &Call, + const Value *SwiftErrorVal) { unsigned Idx = 0; - for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); - I != E; ++I, ++Idx) { + for (auto I = Call.arg_begin(), E = Call.arg_end(); I != E; ++I, ++Idx) { if (*I == SwiftErrorVal) { - Assert(CS.paramHasAttr(Idx, Attribute::SwiftError), + Assert(Call.paramHasAttr(Idx, Attribute::SwiftError), "swifterror value when used in a callsite should be marked " "with swifterror attribute", - SwiftErrorVal, CS); + SwiftErrorVal, Call); } } } @@ -3358,10 +3361,8 @@ Assert(StoreI->getOperand(1) == SwiftErrorVal, "swifterror value should be the second operand when used " "by stores", SwiftErrorVal, U); - if (auto CallI = dyn_cast(U)) - verifySwiftErrorCallSite(const_cast(CallI), SwiftErrorVal); - if (auto II = dyn_cast(U)) - verifySwiftErrorCallSite(const_cast(II), SwiftErrorVal); + if (auto *Call = dyn_cast(U)) + verifySwiftErrorCall(*const_cast(Call), SwiftErrorVal); } } @@ -4078,8 +4079,8 @@ } /// Allow intrinsics to be verified in different ways. -void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) { - Function *IF = CS.getCalledFunction(); +void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { + Function *IF = Call.getCalledFunction(); Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!", IF); @@ -4125,15 +4126,15 @@ // If the intrinsic takes MDNode arguments, verify that they are either global // or are local to *this* function. - for (Value *V : CS.args()) + for (Value *V : Call.args()) if (auto *MD = dyn_cast(V)) - visitMetadataAsValue(*MD, CS.getCaller()); + visitMetadataAsValue(*MD, Call.getCaller()); switch (ID) { default: break; case Intrinsic::coro_id: { - auto *InfoArg = CS.getArgOperand(3)->stripPointerCasts(); + auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts(); if (isa(InfoArg)) break; auto *GV = dyn_cast(InfoArg); @@ -4148,10 +4149,10 @@ } case Intrinsic::ctlz: // llvm.ctlz case Intrinsic::cttz: // llvm.cttz - Assert(isa(CS.getArgOperand(1)), + Assert(isa(Call.getArgOperand(1)), "is_zero_undef argument of bit counting intrinsics must be a " "constant int", - CS); + Call); break; case Intrinsic::experimental_constrained_fadd: case Intrinsic::experimental_constrained_fsub: @@ -4177,59 +4178,58 @@ case Intrinsic::experimental_constrained_floor: case Intrinsic::experimental_constrained_round: case Intrinsic::experimental_constrained_trunc: - visitConstrainedFPIntrinsic( - cast(*CS.getInstruction())); + visitConstrainedFPIntrinsic(cast(Call)); break; case Intrinsic::dbg_declare: // llvm.dbg.declare - Assert(isa(CS.getArgOperand(0)), - "invalid llvm.dbg.declare intrinsic call 1", CS); - visitDbgIntrinsic("declare", cast(*CS.getInstruction())); + Assert(isa(Call.getArgOperand(0)), + "invalid llvm.dbg.declare intrinsic call 1", Call); + visitDbgIntrinsic("declare", cast(Call)); break; case Intrinsic::dbg_addr: // llvm.dbg.addr - visitDbgIntrinsic("addr", cast(*CS.getInstruction())); + visitDbgIntrinsic("addr", cast(Call)); break; case Intrinsic::dbg_value: // llvm.dbg.value - visitDbgIntrinsic("value", cast(*CS.getInstruction())); + visitDbgIntrinsic("value", cast(Call)); break; case Intrinsic::dbg_label: // llvm.dbg.label - visitDbgLabelIntrinsic("label", cast(*CS.getInstruction())); + visitDbgLabelIntrinsic("label", cast(Call)); break; case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: { - const auto *MI = cast(CS.getInstruction()); + const auto *MI = cast(&Call); auto IsValidAlignment = [&](unsigned Alignment) -> bool { return Alignment == 0 || isPowerOf2_32(Alignment); }; Assert(IsValidAlignment(MI->getDestAlignment()), "alignment of arg 0 of memory intrinsic must be 0 or a power of 2", - CS); + Call); if (const auto *MTI = dyn_cast(MI)) { Assert(IsValidAlignment(MTI->getSourceAlignment()), "alignment of arg 1 of memory intrinsic must be 0 or a power of 2", - CS); + Call); } - Assert(isa(CS.getArgOperand(3)), + Assert(isa(Call.getArgOperand(3)), "isvolatile argument of memory intrinsics must be a constant int", - CS); + Call); break; } case Intrinsic::memcpy_element_unordered_atomic: case Intrinsic::memmove_element_unordered_atomic: case Intrinsic::memset_element_unordered_atomic: { - const auto *AMI = cast(CS.getInstruction()); + const auto *AMI = cast(&Call); ConstantInt *ElementSizeCI = dyn_cast(AMI->getRawElementSizeInBytes()); Assert(ElementSizeCI, "element size of the element-wise unordered atomic memory " "intrinsic must be a constant int", - CS); + Call); const APInt &ElementSizeVal = ElementSizeCI->getValue(); Assert(ElementSizeVal.isPowerOf2(), "element size of the element-wise atomic memory intrinsic " "must be a power of 2", - CS); + Call); if (auto *LengthCI = dyn_cast(AMI->getLength())) { uint64_t Length = LengthCI->getZExtValue(); @@ -4237,7 +4237,7 @@ Assert((Length % ElementSize) == 0, "constant length must be a multiple of the element size in the " "element-wise atomic memory intrinsic", - CS); + Call); } auto IsValidAlignment = [&](uint64_t Alignment) { @@ -4245,11 +4245,11 @@ }; uint64_t DstAlignment = AMI->getDestAlignment(); Assert(IsValidAlignment(DstAlignment), - "incorrect alignment of the destination argument", CS); + "incorrect alignment of the destination argument", Call); if (const auto *AMT = dyn_cast(AMI)) { uint64_t SrcAlignment = AMT->getSourceAlignment(); Assert(IsValidAlignment(SrcAlignment), - "incorrect alignment of the source argument", CS); + "incorrect alignment of the source argument", Call); } break; } @@ -4258,76 +4258,76 @@ case Intrinsic::gcread: if (ID == Intrinsic::gcroot) { AllocaInst *AI = - dyn_cast(CS.getArgOperand(0)->stripPointerCasts()); - Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS); - Assert(isa(CS.getArgOperand(1)), - "llvm.gcroot parameter #2 must be a constant.", CS); + dyn_cast(Call.getArgOperand(0)->stripPointerCasts()); + Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call); + Assert(isa(Call.getArgOperand(1)), + "llvm.gcroot parameter #2 must be a constant.", Call); if (!AI->getAllocatedType()->isPointerTy()) { - Assert(!isa(CS.getArgOperand(1)), + Assert(!isa(Call.getArgOperand(1)), "llvm.gcroot parameter #1 must either be a pointer alloca, " "or argument #2 must be a non-null constant.", - CS); + Call); } } - Assert(CS.getParent()->getParent()->hasGC(), - "Enclosing function does not use GC.", CS); + Assert(Call.getParent()->getParent()->hasGC(), + "Enclosing function does not use GC.", Call); break; case Intrinsic::init_trampoline: - Assert(isa(CS.getArgOperand(1)->stripPointerCasts()), + Assert(isa(Call.getArgOperand(1)->stripPointerCasts()), "llvm.init_trampoline parameter #2 must resolve to a function.", - CS); + Call); break; case Intrinsic::prefetch: - Assert(isa(CS.getArgOperand(1)) && - isa(CS.getArgOperand(2)) && - cast(CS.getArgOperand(1))->getZExtValue() < 2 && - cast(CS.getArgOperand(2))->getZExtValue() < 4, - "invalid arguments to llvm.prefetch", CS); + Assert(isa(Call.getArgOperand(1)) && + isa(Call.getArgOperand(2)) && + cast(Call.getArgOperand(1))->getZExtValue() < 2 && + cast(Call.getArgOperand(2))->getZExtValue() < 4, + "invalid arguments to llvm.prefetch", Call); break; case Intrinsic::stackprotector: - Assert(isa(CS.getArgOperand(1)->stripPointerCasts()), - "llvm.stackprotector parameter #2 must resolve to an alloca.", CS); + Assert(isa(Call.getArgOperand(1)->stripPointerCasts()), + "llvm.stackprotector parameter #2 must resolve to an alloca.", Call); break; case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: case Intrinsic::invariant_start: - Assert(isa(CS.getArgOperand(0)), + Assert(isa(Call.getArgOperand(0)), "size argument of memory use markers must be a constant integer", - CS); + Call); break; case Intrinsic::invariant_end: - Assert(isa(CS.getArgOperand(1)), - "llvm.invariant.end parameter #2 must be a constant integer", CS); + Assert(isa(Call.getArgOperand(1)), + "llvm.invariant.end parameter #2 must be a constant integer", Call); break; case Intrinsic::localescape: { - BasicBlock *BB = CS.getParent(); + BasicBlock *BB = Call.getParent(); Assert(BB == &BB->getParent()->front(), - "llvm.localescape used outside of entry block", CS); + "llvm.localescape used outside of entry block", Call); Assert(!SawFrameEscape, - "multiple calls to llvm.localescape in one function", CS); - for (Value *Arg : CS.args()) { + "multiple calls to llvm.localescape in one function", Call); + for (Value *Arg : Call.args()) { if (isa(Arg)) continue; // Null values are allowed as placeholders. auto *AI = dyn_cast(Arg->stripPointerCasts()); Assert(AI && AI->isStaticAlloca(), - "llvm.localescape only accepts static allocas", CS); + "llvm.localescape only accepts static allocas", Call); } - FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands(); + FrameEscapeInfo[BB->getParent()].first = Call.getNumArgOperands(); SawFrameEscape = true; break; } case Intrinsic::localrecover: { - Value *FnArg = CS.getArgOperand(0)->stripPointerCasts(); + Value *FnArg = Call.getArgOperand(0)->stripPointerCasts(); Function *Fn = dyn_cast(FnArg); Assert(Fn && !Fn->isDeclaration(), "llvm.localrecover first " "argument must be function defined in this module", - CS); - auto *IdxArg = dyn_cast(CS.getArgOperand(2)); + Call); + auto *IdxArg = dyn_cast(Call.getArgOperand(2)); Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int", - CS); + Call); auto &Entry = FrameEscapeInfo[Fn]; Entry.second = unsigned( std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1)); @@ -4335,45 +4335,46 @@ } case Intrinsic::experimental_gc_statepoint: - Assert(!CS.isInlineAsm(), - "gc.statepoint support for inline assembly unimplemented", CS); - Assert(CS.getParent()->getParent()->hasGC(), - "Enclosing function does not use GC.", CS); + if (auto *CI = dyn_cast(&Call)) + Assert(!CI->isInlineAsm(), + "gc.statepoint support for inline assembly unimplemented", CI); + Assert(Call.getParent()->getParent()->hasGC(), + "Enclosing function does not use GC.", Call); - verifyStatepoint(CS); + verifyStatepoint(Call); break; case Intrinsic::experimental_gc_result: { - Assert(CS.getParent()->getParent()->hasGC(), - "Enclosing function does not use GC.", CS); + Assert(Call.getParent()->getParent()->hasGC(), + "Enclosing function does not use GC.", Call); // Are we tied to a statepoint properly? - CallSite StatepointCS(CS.getArgOperand(0)); + const auto *StatepointCall = dyn_cast(Call.getArgOperand(0)); const Function *StatepointFn = - StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr; + StatepointCall ? StatepointCall->getCalledFunction() : nullptr; Assert(StatepointFn && StatepointFn->isDeclaration() && StatepointFn->getIntrinsicID() == Intrinsic::experimental_gc_statepoint, - "gc.result operand #1 must be from a statepoint", CS, - CS.getArgOperand(0)); + "gc.result operand #1 must be from a statepoint", Call, + Call.getArgOperand(0)); // Assert that result type matches wrapped callee. - const Value *Target = StatepointCS.getArgument(2); + const Value *Target = StatepointCall->getArgOperand(2); auto *PT = cast(Target->getType()); auto *TargetFuncType = cast(PT->getElementType()); - Assert(CS.getType() == TargetFuncType->getReturnType(), - "gc.result result type does not match wrapped callee", CS); + Assert(Call.getType() == TargetFuncType->getReturnType(), + "gc.result result type does not match wrapped callee", Call); break; } case Intrinsic::experimental_gc_relocate: { - Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS); + Assert(Call.getNumArgOperands() == 3, "wrong number of arguments", Call); - Assert(isa(CS.getType()->getScalarType()), - "gc.relocate must return a pointer or a vector of pointers", CS); + Assert(isa(Call.getType()->getScalarType()), + "gc.relocate must return a pointer or a vector of pointers", Call); // Check that this relocate is correctly tied to the statepoint // This is case for relocate on the unwinding path of an invoke statepoint if (LandingPadInst *LandingPad = - dyn_cast(CS.getArgOperand(0))) { + dyn_cast(Call.getArgOperand(0))) { const BasicBlock *InvokeBB = LandingPad->getParent()->getUniquePredecessor(); @@ -4386,161 +4387,160 @@ InvokeBB); Assert(isStatepoint(InvokeBB->getTerminator()), "gc relocate should be linked to a statepoint", InvokeBB); - } - else { + } else { // In all other cases relocate should be tied to the statepoint directly. // This covers relocates on a normal return path of invoke statepoint and // relocates of a call statepoint. - auto Token = CS.getArgOperand(0); + auto Token = Call.getArgOperand(0); Assert(isa(Token) && isStatepoint(cast(Token)), - "gc relocate is incorrectly tied to the statepoint", CS, Token); + "gc relocate is incorrectly tied to the statepoint", Call, Token); } // Verify rest of the relocate arguments. - - ImmutableCallSite StatepointCS( - cast(*CS.getInstruction()).getStatepoint()); + const CallBase &StatepointCall = + *cast(Call).getStatepoint(); // Both the base and derived must be piped through the safepoint. - Value* Base = CS.getArgOperand(1); + Value *Base = Call.getArgOperand(1); Assert(isa(Base), - "gc.relocate operand #2 must be integer offset", CS); + "gc.relocate operand #2 must be integer offset", Call); - Value* Derived = CS.getArgOperand(2); + Value *Derived = Call.getArgOperand(2); Assert(isa(Derived), - "gc.relocate operand #3 must be integer offset", CS); + "gc.relocate operand #3 must be integer offset", Call); const int BaseIndex = cast(Base)->getZExtValue(); const int DerivedIndex = cast(Derived)->getZExtValue(); // Check the bounds - Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(), - "gc.relocate: statepoint base index out of bounds", CS); - Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(), - "gc.relocate: statepoint derived index out of bounds", CS); + Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCall.arg_size(), + "gc.relocate: statepoint base index out of bounds", Call); + Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCall.arg_size(), + "gc.relocate: statepoint derived index out of bounds", Call); // Check that BaseIndex and DerivedIndex fall within the 'gc parameters' // section of the statepoint's argument. - Assert(StatepointCS.arg_size() > 0, + Assert(StatepointCall.arg_size() > 0, "gc.statepoint: insufficient arguments"); - Assert(isa(StatepointCS.getArgument(3)), + Assert(isa(StatepointCall.getArgOperand(3)), "gc.statement: number of call arguments must be constant integer"); const unsigned NumCallArgs = - cast(StatepointCS.getArgument(3))->getZExtValue(); - Assert(StatepointCS.arg_size() > NumCallArgs + 5, + cast(StatepointCall.getArgOperand(3))->getZExtValue(); + Assert(StatepointCall.arg_size() > NumCallArgs + 5, "gc.statepoint: mismatch in number of call arguments"); - Assert(isa(StatepointCS.getArgument(NumCallArgs + 5)), + Assert(isa(StatepointCall.getArgOperand(NumCallArgs + 5)), "gc.statepoint: number of transition arguments must be " "a constant integer"); const int NumTransitionArgs = - cast(StatepointCS.getArgument(NumCallArgs + 5)) + cast(StatepointCall.getArgOperand(NumCallArgs + 5)) ->getZExtValue(); const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1; - Assert(isa(StatepointCS.getArgument(DeoptArgsStart)), + Assert(isa(StatepointCall.getArgOperand(DeoptArgsStart)), "gc.statepoint: number of deoptimization arguments must be " "a constant integer"); const int NumDeoptArgs = - cast(StatepointCS.getArgument(DeoptArgsStart)) + cast(StatepointCall.getArgOperand(DeoptArgsStart)) ->getZExtValue(); const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs; - const int GCParamArgsEnd = StatepointCS.arg_size(); + const int GCParamArgsEnd = StatepointCall.arg_size(); Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd, "gc.relocate: statepoint base index doesn't fall within the " "'gc parameters' section of the statepoint call", - CS); + Call); Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd, "gc.relocate: statepoint derived index doesn't fall within the " "'gc parameters' section of the statepoint call", - CS); + Call); // Relocated value must be either a pointer type or vector-of-pointer type, // but gc_relocate does not need to return the same pointer type as the // relocated pointer. It can be casted to the correct type later if it's // desired. However, they must have the same address space and 'vectorness' - GCRelocateInst &Relocate = cast(*CS.getInstruction()); + GCRelocateInst &Relocate = cast(Call); Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(), - "gc.relocate: relocated value must be a gc pointer", CS); + "gc.relocate: relocated value must be a gc pointer", Call); - auto ResultType = CS.getType(); + auto ResultType = Call.getType(); auto DerivedType = Relocate.getDerivedPtr()->getType(); Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(), "gc.relocate: vector relocates to vector and pointer to pointer", - CS); + Call); Assert( ResultType->getPointerAddressSpace() == DerivedType->getPointerAddressSpace(), "gc.relocate: relocating a pointer shouldn't change its address space", - CS); + Call); break; } case Intrinsic::eh_exceptioncode: case Intrinsic::eh_exceptionpointer: { - Assert(isa(CS.getArgOperand(0)), - "eh.exceptionpointer argument must be a catchpad", CS); + Assert(isa(Call.getArgOperand(0)), + "eh.exceptionpointer argument must be a catchpad", Call); break; } case Intrinsic::masked_load: { - Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS); + Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector", + Call); - Value *Ptr = CS.getArgOperand(0); - //Value *Alignment = CS.getArgOperand(1); - Value *Mask = CS.getArgOperand(2); - Value *PassThru = CS.getArgOperand(3); - Assert(Mask->getType()->isVectorTy(), - "masked_load: mask must be vector", CS); + Value *Ptr = Call.getArgOperand(0); + // Value *Alignment = Call.getArgOperand(1); + Value *Mask = Call.getArgOperand(2); + Value *PassThru = Call.getArgOperand(3); + Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector", + Call); // DataTy is the overloaded type Type *DataTy = cast(Ptr->getType())->getElementType(); - Assert(DataTy == CS.getType(), - "masked_load: return must match pointer type", CS); + Assert(DataTy == Call.getType(), + "masked_load: return must match pointer type", Call); Assert(PassThru->getType() == DataTy, - "masked_load: pass through and data type must match", CS); + "masked_load: pass through and data type must match", Call); Assert(Mask->getType()->getVectorNumElements() == - DataTy->getVectorNumElements(), - "masked_load: vector mask must be same length as data", CS); + DataTy->getVectorNumElements(), + "masked_load: vector mask must be same length as data", Call); break; } case Intrinsic::masked_store: { - Value *Val = CS.getArgOperand(0); - Value *Ptr = CS.getArgOperand(1); - //Value *Alignment = CS.getArgOperand(2); - Value *Mask = CS.getArgOperand(3); - Assert(Mask->getType()->isVectorTy(), - "masked_store: mask must be vector", CS); + Value *Val = Call.getArgOperand(0); + Value *Ptr = Call.getArgOperand(1); + // Value *Alignment = Call.getArgOperand(2); + Value *Mask = Call.getArgOperand(3); + Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector", + Call); // DataTy is the overloaded type Type *DataTy = cast(Ptr->getType())->getElementType(); Assert(DataTy == Val->getType(), - "masked_store: storee must match pointer type", CS); + "masked_store: storee must match pointer type", Call); Assert(Mask->getType()->getVectorNumElements() == - DataTy->getVectorNumElements(), - "masked_store: vector mask must be same length as data", CS); + DataTy->getVectorNumElements(), + "masked_store: vector mask must be same length as data", Call); break; } case Intrinsic::experimental_guard: { - Assert(CS.isCall(), "experimental_guard cannot be invoked", CS); - Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, + Assert(isa(Call), "experimental_guard cannot be invoked", Call); + Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, "experimental_guard must have exactly one " "\"deopt\" operand bundle"); break; } case Intrinsic::experimental_deoptimize: { - Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS); - Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, + Assert(isa(Call), "experimental_deoptimize cannot be invoked", + Call); + Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, "experimental_deoptimize must have exactly one " "\"deopt\" operand bundle"); - Assert(CS.getType() == CS.getInstruction()->getFunction()->getReturnType(), + Assert(Call.getType() == Call.getFunction()->getReturnType(), "experimental_deoptimize return type must match caller return type"); - if (CS.isCall()) { - auto *DeoptCI = CS.getInstruction(); - auto *RI = dyn_cast(DeoptCI->getNextNode()); + if (isa(Call)) { + auto *RI = dyn_cast(Call.getNextNode()); Assert(RI, "calls to experimental_deoptimize must be followed by a return"); - if (!CS.getType()->isVoidTy() && RI) - Assert(RI->getReturnValue() == DeoptCI, + if (!Call.getType()->isVoidTy() && RI) + Assert(RI->getReturnValue() == &Call, "calls to experimental_deoptimize must be followed by a return " "of the value computed by experimental_deoptimize"); } @@ -4551,8 +4551,8 @@ case Intrinsic::uadd_sat: case Intrinsic::ssub_sat: case Intrinsic::usub_sat: { - Value *Op1 = CS.getArgOperand(0); - Value *Op2 = CS.getArgOperand(1); + Value *Op1 = Call.getArgOperand(0); + Value *Op2 = Call.getArgOperand(1); Assert(Op1->getType()->isIntOrIntVectorTy(), "first operand of [us][add|sub]_sat must be an int type or vector " "of ints"); @@ -4562,8 +4562,8 @@ break; } case Intrinsic::smul_fix: { - Value *Op1 = CS.getArgOperand(0); - Value *Op2 = CS.getArgOperand(1); + Value *Op1 = Call.getArgOperand(0); + Value *Op2 = Call.getArgOperand(1); Assert(Op1->getType()->isIntOrIntVectorTy(), "first operand of smul_fix must be an int type or vector " "of ints"); @@ -4571,7 +4571,7 @@ "second operand of smul_fix must be an int type or vector " "of ints"); - auto *Op3 = dyn_cast(CS.getArgOperand(2)); + auto *Op3 = dyn_cast(Call.getArgOperand(2)); Assert(Op3, "third argument of smul_fix must be a constant integer"); Assert(Op3->getType()->getBitWidth() <= 32, "third argument of smul_fix must fit within 32 bits");