diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -2701,10 +2701,10 @@ bool doRetainAutorelease; - if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) { + if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { doRetainAutorelease = true; - } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints() - .objc_retainAutoreleasedReturnValue) { + } else if (call->getCalledOperand() == + CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { doRetainAutorelease = false; // If we emitted an assembly marker for this call (and the @@ -2720,8 +2720,8 @@ assert(prev); } assert(isa(prev)); - assert(cast(prev)->getCalledValue() == - CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); + assert(cast(prev)->getCalledOperand() == + CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); InstsToKill.push_back(prev); } } else { @@ -2764,8 +2764,8 @@ // Look for a retain call. llvm::CallInst *retainCall = dyn_cast(result->stripPointerCasts()); - if (!retainCall || - retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain) + if (!retainCall || retainCall->getCalledOperand() != + CGF.CGM.getObjCEntrypoints().objc_retain) return nullptr; // Look for an ordinary load of 'self'. diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp --- a/clang/lib/CodeGen/CGObjC.cpp +++ b/clang/lib/CodeGen/CGObjC.cpp @@ -2160,7 +2160,8 @@ if (!mandatory && isa(result)) { llvm::CallInst *call = cast(result->stripPointerCasts()); - assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock); + assert(call->getCalledOperand() == + CGM.getObjCEntrypoints().objc_retainBlock); call->setMetadata("clang.arc.copy_on_escape", llvm::MDNode::get(Builder.getContext(), None)); diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -2464,7 +2464,7 @@ llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption) { assert(Assumption && isa(Assumption) && - cast(Assumption)->getCalledValue() == + cast(Assumption)->getCalledOperand() == llvm::Intrinsic::getDeclaration( Builder.GetInsertBlock()->getParent()->getParent(), llvm::Intrinsic::assume) && diff --git a/lldb/source/Expression/IRInterpreter.cpp b/lldb/source/Expression/IRInterpreter.cpp --- a/lldb/source/Expression/IRInterpreter.cpp +++ b/lldb/source/Expression/IRInterpreter.cpp @@ -1371,7 +1371,7 @@ // Find the address of the callee function lldb_private::Scalar I; - const llvm::Value *val = call_inst->getCalledValue(); + const llvm::Value *val = call_inst->getCalledOperand(); if (!frame.EvaluateValue(I, val, module)) { error.SetErrorToGenericError(); diff --git a/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp b/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp --- a/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp @@ -465,7 +465,7 @@ } static llvm::Function *GetCalledFunction(llvm::CallInst *inst) { - return GetFunction(inst->getCalledValue()); + return GetFunction(inst->getCalledOperand()); } bool InspectInstruction(llvm::Instruction &i) override { diff --git a/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp b/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp --- a/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp @@ -1395,7 +1395,7 @@ if (func && func->getName() == "__cxa_atexit") remove = true; - llvm::Value *val = call->getCalledValue(); + llvm::Value *val = call->getCalledOperand(); if (val && val->getName() == "__cxa_atexit") remove = true; diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h --- a/llvm/include/llvm-c/Core.h +++ b/llvm/include/llvm-c/Core.h @@ -3252,8 +3252,8 @@ * This expects an LLVMValueRef that corresponds to a llvm::CallInst or * llvm::InvokeInst. * - * @see llvm::CallInst::getCalledValue() - * @see llvm::InvokeInst::getCalledValue() + * @see llvm::CallInst::getCalledOperand() + * @see llvm::InvokeInst::getCalledOperand() */ LLVMValueRef LLVMGetCalledValue(LLVMValueRef Instr); diff --git a/llvm/include/llvm/CodeGen/FastISel.h b/llvm/include/llvm/CodeGen/FastISel.h --- a/llvm/include/llvm/CodeGen/FastISel.h +++ b/llvm/include/llvm/CodeGen/FastISel.h @@ -127,7 +127,7 @@ const CallBase &Call, unsigned FixedArgs = ~0U) { RetTy = ResultTy; - Callee = Call.getCalledValue(); + Callee = Call.getCalledOperand(); Symbol = Target; IsInReg = Call.hasRetAttr(Attribute::InReg); diff --git a/llvm/include/llvm/IR/AbstractCallSite.h b/llvm/include/llvm/IR/AbstractCallSite.h --- a/llvm/include/llvm/IR/AbstractCallSite.h +++ b/llvm/include/llvm/IR/AbstractCallSite.h @@ -201,16 +201,16 @@ } /// Return the pointer to function that is being called. - Value *getCalledValue() const { + Value *getCalledOperand() const { if (isDirectCall()) - return CB->getCalledValue(); + return CB->getCalledOperand(); return CB->getArgOperand(getCallArgOperandNoForCallee()); } /// Return the function being called if this is a direct call, otherwise /// return null (if it's an indirect call). Function *getCalledFunction() const { - Value *V = getCalledValue(); + Value *V = getCalledOperand(); return V ? dyn_cast(V->stripPointerCasts()) : nullptr; } }; diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -1286,10 +1286,6 @@ Value *getCalledOperand() const { return Op(); } - // DEPRECATED: This routine will be removed in favor of `getCalledOperand` in - // the near future. - Value *getCalledValue() const { return getCalledOperand(); } - const Use &getCalledOperandUse() const { return Op(); } Use &getCalledOperandUse() { return Op(); } diff --git a/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp --- a/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp +++ b/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp @@ -114,7 +114,7 @@ Stores.insert(&*I); Instruction &Inst = *I; if (auto *Call = dyn_cast(&Inst)) { - Value *Callee = Call->getCalledValue(); + Value *Callee = Call->getCalledOperand(); // Skip actual functions for direct function calls. if (!isa(Callee) && isInterestingPointer(Callee)) Pointers.insert(Callee); diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -5407,7 +5407,7 @@ } Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) { - Value *Callee = Call->getCalledValue(); + Value *Callee = Call->getCalledOperand(); // musttail calls can only be simplified if they are also DCEd. // As we can't guarantee this here, don't simplify them. diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp --- a/llvm/lib/Analysis/Lint.cpp +++ b/llvm/lib/Analysis/Lint.cpp @@ -220,7 +220,7 @@ } void Lint::visitCallBase(CallBase &I) { - Value *Callee = I.getCalledValue(); + Value *Callee = I.getCalledOperand(); visitMemoryReference(I, Callee, MemoryLocation::UnknownSize, 0, nullptr, MemRef::Callee); diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp --- a/llvm/lib/Analysis/MemorySSA.cpp +++ b/llvm/lib/Analysis/MemorySSA.cpp @@ -167,7 +167,7 @@ if (!IsCall) return Loc == Other.Loc; - if (Call->getCalledValue() != Other.Call->getCalledValue()) + if (Call->getCalledOperand() != Other.Call->getCalledOperand()) return false; return Call->arg_size() == Other.Call->arg_size() && @@ -203,7 +203,7 @@ hash_code hash = hash_combine(MLOC.IsCall, DenseMapInfo::getHashValue( - MLOC.getCall()->getCalledValue())); + MLOC.getCall()->getCalledOperand())); for (const Value *Arg : MLOC.getCall()->args()) hash = hash_combine(hash, DenseMapInfo::getHashValue(Arg)); diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp --- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp +++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp @@ -316,7 +316,7 @@ if (HasLocalsInUsedOrAsm && CI && CI->isInlineAsm()) HasInlineAsmMaybeReferencingInternal = true; - auto *CalledValue = CB->getCalledValue(); + auto *CalledValue = CB->getCalledOperand(); auto *CalledFunction = CB->getCalledFunction(); if (CalledValue && !CalledFunction) { CalledValue = CalledValue->stripPointerCasts(); diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp --- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp +++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp @@ -353,7 +353,7 @@ // Do not follow aliases, otherwise we could inadvertently follow // dso_preemptable aliases or aliases with interposable linkage. const GlobalValue *Callee = - dyn_cast(CB.getCalledValue()->stripPointerCasts()); + dyn_cast(CB.getCalledOperand()->stripPointerCasts()); if (!Callee) { US.updateRange(UnknownRange); return false; diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -2775,7 +2775,7 @@ case Instruction::Invoke: { const InvokeInst *II = cast(&I); - const Value *Callee = II->getCalledValue(); + const Value *Callee = II->getCalledOperand(); FunctionType *FTy = II->getFunctionType(); if (II->hasOperandBundles()) @@ -2851,7 +2851,7 @@ } case Instruction::CallBr: { const CallBrInst *CBI = cast(&I); - const Value *Callee = CBI->getCalledValue(); + const Value *Callee = CBI->getCalledOperand(); FunctionType *FTy = CBI->getFunctionType(); if (CBI->hasOperandBundles()) @@ -3029,7 +3029,7 @@ Vals.push_back(Flags); Vals.push_back(VE.getTypeID(FTy)); - pushValueAndType(CI.getCalledValue(), InstID, Vals); // Callee + pushValueAndType(CI.getCalledOperand(), InstID, Vals); // Callee // Emit value #'s for the fixed parameters. for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -1889,7 +1889,7 @@ // Lower inline assembly if we can. // If we found an inline asm expession, and if the target knows how to // lower it to normal LLVM code, do so now. - if (isa(CI->getCalledValue())) { + if (CI->isInlineAsm()) { if (TLI->ExpandInlineAsm(CI)) { // Avoid invalidating the iterator. CurInstIterator = BB->begin(); @@ -4636,7 +4636,7 @@ continue; } - InlineAsm *IA = dyn_cast(CI->getCalledValue()); + InlineAsm *IA = dyn_cast(CI->getCalledOperand()); if (!IA) return true; // If this is a memory operand, we're cool, otherwise bail out. diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp --- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -52,7 +52,7 @@ // Try looking through a bitcast from one function type to another. // Commonly happens with calls to objc_msgSend(). - const Value *CalleeV = CB.getCalledValue()->stripPointerCasts(); + const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts(); if (const Function *F = dyn_cast(CalleeV)) Info.Callee = MachineOperand::CreateGA(F, 0); else diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1607,7 +1607,7 @@ // scan is done to check if any instructions are calls. bool Success = CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg, - [&]() { return getOrCreateVReg(*CB.getCalledValue()); }); + [&]() { return getOrCreateVReg(*CB.getCalledOperand()); }); // Check if we just inserted a tail call. if (Success) { @@ -1712,9 +1712,8 @@ const BasicBlock *ReturnBB = I.getSuccessor(0); const BasicBlock *EHPadBB = I.getSuccessor(1); - const Value *Callee = I.getCalledValue(); - const Function *Fn = dyn_cast(Callee); - if (isa(Callee)) + const Function *Fn = I.getCalledFunction(); + if (I.isInlineAsm()) return false; // FIXME: support invoking patchpoint and statepoint intrinsics. diff --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp --- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp @@ -32,7 +32,7 @@ bool InlineAsmLowering::lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &Call) const { - const InlineAsm *IA = cast(Call.getCalledValue()); + const InlineAsm *IA = cast(Call.getCalledOperand()); StringRef ConstraintStr = IA->getConstraintString(); bool HasOnlyMemoryClobber = false; diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp --- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp +++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp @@ -39,7 +39,7 @@ for (auto I = F.use_begin(), E = F.use_end(); I != E;) { auto CI = dyn_cast(I->getUser()); ++I; - if (!CI || CI->getCalledValue() != &F) + if (!CI || CI->getCalledOperand() != &F) continue; IRBuilder<> B(CI); diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -1290,7 +1290,7 @@ IsTailCall = false; CallLoweringInfo CLI; - CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), *CI) + CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI) .setTailCall(IsTailCall); return lowerCallTo(CLI); @@ -1300,7 +1300,7 @@ const CallInst *Call = cast(I); // Handle simple inline asms. - if (const InlineAsm *IA = dyn_cast(Call->getCalledValue())) { + if (const InlineAsm *IA = dyn_cast(Call->getCalledOperand())) { // If the inline asm has side effects, then make sure that no local value // lives across by flushing the local value map. if (IA->hasSideEffects()) diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp --- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -183,7 +183,7 @@ // Look for inline asm that clobbers the SP register. if (auto *Call = dyn_cast(&I)) { - if (isa(Call->getCalledValue())) { + if (Call->isInlineAsm()) { unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); std::vector Ops = diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -346,7 +346,7 @@ const char *AsmError = ", possible invalid constraint for vector type"; if (const CallInst *CI = dyn_cast(I)) - if (isa(CI->getCalledValue())) + if (isa(CI->getCalledOperand())) return Ctx.emitError(I, ErrMsg + AsmError); return Ctx.emitError(I, ErrMsg); @@ -2776,7 +2776,7 @@ LLVMContext::OB_cfguardtarget}) && "Cannot lower invokes with arbitrary operand bundles yet!"); - const Value *Callee(I.getCalledValue()); + const Value *Callee(I.getCalledOperand()); const Function *Fn = dyn_cast(Callee); if (isa(Callee)) visitInlineAsm(I); @@ -2856,7 +2856,7 @@ {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && "Cannot lower callbrs with arbitrary operand bundles yet!"); - assert(isa(I.getCalledValue()) && + assert(isa(I.getCalledOperand()) && "Only know how to handle inlineasm callbr"); visitInlineAsm(I); CopyToExportRegsIfNeeded(&I); @@ -7476,7 +7476,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) { // Handle inline assembly differently. - if (isa(I.getCalledValue())) { + if (I.isInlineAsm()) { visitInlineAsm(I); return; } @@ -7648,7 +7648,7 @@ LLVMContext::OB_cfguardtarget}) && "Cannot lower calls with arbitrary operand bundles!"); - SDValue Callee = getValue(I.getCalledValue()); + SDValue Callee = getValue(I.getCalledOperand()); if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) LowerCallSiteWithDeoptBundle(&I, Callee, nullptr); @@ -7949,7 +7949,7 @@ public: explicit ExtraFlags(const CallBase &Call) { - const InlineAsm *IA = cast(Call.getCalledValue()); + const InlineAsm *IA = cast(Call.getCalledOperand()); if (IA->hasSideEffects()) Flags |= InlineAsm::Extra_HasSideEffects; if (IA->isAlignStack()) @@ -7982,7 +7982,7 @@ /// visitInlineAsm - Handle a call to an InlineAsm object. void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) { - const InlineAsm *IA = cast(Call.getCalledValue()); + const InlineAsm *IA = cast(Call.getCalledOperand()); /// ConstraintOperands - Information about all of the constraints. SDISelAsmOperandInfoVector ConstraintOperands; @@ -8656,7 +8656,7 @@ SmallVector Ops; SDLoc DL = getCurSDLoc(); - Callee = getValue(CI.getCalledValue()); + Callee = getValue(CI.getCalledOperand()); NullPtr = DAG.getIntPtrConstant(0, DL, true); // The stackmap intrinsic only records the live variables (the arguments diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -4321,7 +4321,7 @@ const CallBase &Call) const { /// Information about all of the constraints. AsmOperandInfoVector ConstraintOperands; - const InlineAsm *IA = cast(Call.getCalledValue()); + const InlineAsm *IA = cast(Call.getCalledOperand()); unsigned maCount = 0; // Largest number of multiple alternative constraints. // Do a prepass over the constraints, canonicalizing them, and building up the diff --git a/llvm/lib/CodeGen/WasmEHPrepare.cpp b/llvm/lib/CodeGen/WasmEHPrepare.cpp --- a/llvm/lib/CodeGen/WasmEHPrepare.cpp +++ b/llvm/lib/CodeGen/WasmEHPrepare.cpp @@ -358,9 +358,9 @@ Instruction *GetExnCI = nullptr, *GetSelectorCI = nullptr; for (auto &U : FPI->uses()) { if (auto *CI = dyn_cast(U.getUser())) { - if (CI->getCalledValue() == GetExnF) + if (CI->getCalledOperand() == GetExnF) GetExnCI = CI; - if (CI->getCalledValue() == GetSelectorF) + if (CI->getCalledOperand() == GetSelectorF) GetSelectorCI = CI; } } diff --git a/llvm/lib/CodeGen/WinEHPrepare.cpp b/llvm/lib/CodeGen/WinEHPrepare.cpp --- a/llvm/lib/CodeGen/WinEHPrepare.cpp +++ b/llvm/lib/CodeGen/WinEHPrepare.cpp @@ -955,7 +955,7 @@ // Skip call sites which are nounwind intrinsics or inline asm. auto *CalledFn = - dyn_cast(CB->getCalledValue()->stripPointerCasts()); + dyn_cast(CB->getCalledOperand()->stripPointerCasts()); if (CalledFn && ((CalledFn->isIntrinsic() && CB->doesNotThrow()) || CB->isInlineAsm())) continue; diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp --- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -1167,7 +1167,7 @@ // To handle indirect calls, we must get the pointer value from the argument // and treat it as a function pointer. - GenericValue SRC = getOperandValue(SF.Caller->getCalledValue(), SF); + GenericValue SRC = getOperandValue(SF.Caller->getCalledOperand(), SF); callFunction((Function*)GVTOP(SRC), ArgVals); } diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -3905,7 +3905,7 @@ PrintCallingConv(CI->getCallingConv(), Out); } - Operand = CI->getCalledValue(); + Operand = CI->getCalledOperand(); FunctionType *FTy = CI->getFunctionType(); Type *RetTy = FTy->getReturnType(); const AttributeList &PAL = CI->getAttributes(); @@ -3944,7 +3944,7 @@ writeOperandBundles(CI); } else if (const InvokeInst *II = dyn_cast(&I)) { - Operand = II->getCalledValue(); + Operand = II->getCalledOperand(); FunctionType *FTy = II->getFunctionType(); Type *RetTy = FTy->getReturnType(); const AttributeList &PAL = II->getAttributes(); @@ -3987,7 +3987,7 @@ Out << " unwind "; writeOperand(II->getUnwindDest(), true); } else if (const CallBrInst *CBI = dyn_cast(&I)) { - Operand = CBI->getCalledValue(); + Operand = CBI->getCalledOperand(); FunctionType *FTy = CBI->getFunctionType(); Type *RetTy = FTy->getReturnType(); const AttributeList &PAL = CBI->getAttributes(); diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -2840,7 +2840,7 @@ } LLVMValueRef LLVMGetCalledValue(LLVMValueRef Instr) { - return wrap(unwrap(Instr)->getCalledValue()); + return wrap(unwrap(Instr)->getCalledOperand()); } LLVMTypeRef LLVMGetCalledFunctionType(LLVMValueRef Instr) { diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -255,7 +255,7 @@ } bool CallBase::isIndirectCall() const { - const Value *V = getCalledValue(); + const Value *V = getCalledOperand(); if (isa(V) || isa(V)) return false; return !isInlineAsm(); @@ -491,7 +491,7 @@ Instruction *InsertPt) { std::vector Args(CI->arg_begin(), CI->arg_end()); - auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledValue(), + auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Args, OpB, CI->getName(), InsertPt); NewCI->setTailCallKind(CI->getTailCallKind()); NewCI->setCallingConv(CI->getCallingConv()); @@ -802,9 +802,9 @@ Instruction *InsertPt) { std::vector Args(II->arg_begin(), II->arg_end()); - auto *NewII = InvokeInst::Create(II->getFunctionType(), II->getCalledValue(), - II->getNormalDest(), II->getUnwindDest(), - Args, OpB, II->getName(), InsertPt); + auto *NewII = InvokeInst::Create( + II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(), + II->getUnwindDest(), Args, OpB, II->getName(), InsertPt); NewII->setCallingConv(II->getCallingConv()); NewII->SubclassOptionalData = II->SubclassOptionalData; NewII->setAttributes(II->getAttributes()); @@ -885,11 +885,9 @@ Instruction *InsertPt) { std::vector Args(CBI->arg_begin(), CBI->arg_end()); - auto *NewCBI = CallBrInst::Create(CBI->getFunctionType(), - CBI->getCalledValue(), - CBI->getDefaultDest(), - CBI->getIndirectDests(), - Args, OpB, CBI->getName(), InsertPt); + auto *NewCBI = CallBrInst::Create( + CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(), + CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt); NewCBI->setCallingConv(CBI->getCallingConv()); NewCBI->SubclassOptionalData = CBI->SubclassOptionalData; NewCBI->setAttributes(CBI->getAttributes()); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -2872,9 +2872,9 @@ } void Verifier::visitCallBase(CallBase &Call) { - Assert(Call.getCalledValue()->getType()->isPointerTy(), + Assert(Call.getCalledOperand()->getType()->isPointerTy(), "Called function must be a pointer!", Call); - PointerType *FPTy = cast(Call.getCalledValue()->getType()); + PointerType *FPTy = cast(Call.getCalledOperand()->getType()); Assert(FPTy->getElementType()->isFunctionTy(), "Called function is not pointer to function type!", Call); @@ -2907,8 +2907,8 @@ bool IsIntrinsic = Call.getCalledFunction() && Call.getCalledFunction()->getName().startswith("llvm."); - Function *Callee - = dyn_cast(Call.getCalledValue()->stripPointerCasts()); + Function *Callee = + dyn_cast(Call.getCalledOperand()->stripPointerCasts()); if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) { // Don't allow speculatable on call sites, unless the underlying function diff --git a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp --- a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp +++ b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -304,7 +304,7 @@ // Do not mess with inline asm. const CallInst *CI = dyn_cast(Instr); - return !(CI && isa(CI->getCalledValue())); + return !(CI && CI->isInlineAsm()); } /// Check if the given Cst should be converted into diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp @@ -288,7 +288,7 @@ for (Instruction &I : BB) { if (auto *CB = dyn_cast(&I)) { const Function *Callee = - dyn_cast(CB->getCalledValue()->stripPointerCasts()); + dyn_cast(CB->getCalledOperand()->stripPointerCasts()); // TODO: Do something with indirect calls. if (!Callee) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp b/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp @@ -34,7 +34,8 @@ void visitCallBase(CallBase &CB) { if (CB.getCalledFunction()) return; - auto *Callee = dyn_cast(CB.getCalledValue()->stripPointerCasts()); + auto *Callee = + dyn_cast(CB.getCalledOperand()->stripPointerCasts()); if (Callee && isLegalToPromote(CB, Callee)) { promoteCall(CB, Callee); Modified = true; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -782,7 +782,7 @@ // Assume all function calls are a source of divergence. if (const CallInst *CI = dyn_cast(V)) { - if (isa(CI->getCalledValue())) + if (CI->isInlineAsm()) return isInlineAsmSourceOfDivergence(CI); return true; } @@ -810,7 +810,7 @@ } if (const CallInst *CI = dyn_cast(V)) { - if (isa(CI->getCalledValue())) + if (CI->isInlineAsm()) return !isInlineAsmSourceOfDivergence(CI); return false; } @@ -838,7 +838,7 @@ // If we have inline asm returning mixed SGPR and VGPR results, we inferred // divergent for the overall struct return. We need to override it in the // case we're extracting an SGPR component here. - if (isa(CI->getCalledValue())) + if (CI->isInlineAsm()) return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices()); return false; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -11009,7 +11009,7 @@ bool SITargetLowering::requiresUniformRegister(MachineFunction &MF, const Value *V) const { if (const CallInst *CI = dyn_cast(V)) { - if (isa(CI->getCalledValue())) { + if (CI->isInlineAsm()) { // FIXME: This cannot give a correct answer. This should only trigger in // the case where inline asm returns mixed SGPR and VGPR results, used // outside the defining block. We don't have a specific result to diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -2288,7 +2288,7 @@ bool ARMFastISel::SelectCall(const Instruction *I, const char *IntrMemName = nullptr) { const CallInst *CI = cast(I); - const Value *Callee = CI->getCalledValue(); + const Value *Callee = CI->getCalledOperand(); // Can't handle inline asm. if (isa(Callee)) return false; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -16518,7 +16518,7 @@ if (!Subtarget->hasV6Ops()) return false; - InlineAsm *IA = cast(CI->getCalledValue()); + InlineAsm *IA = cast(CI->getCalledOperand()); std::string AsmStr = IA->getAsmString(); SmallVector AsmPieces; SplitString(AsmStr, AsmPieces, ";\n"); diff --git a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp --- a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp +++ b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp @@ -239,7 +239,7 @@ if (!Call) return false; - const auto *GV = dyn_cast(Call->getCalledValue()); + const auto *GV = dyn_cast(Call->getCalledOperand()); if (!GV) return false; if (GV->getName().startswith("llvm.preserve.array.access.index")) { diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -1361,19 +1361,19 @@ } unsigned Alignment = 0; - const Value *DirectCallee = CB->getCalledFunction(); + const Function *DirectCallee = CB->getCalledFunction(); if (!DirectCallee) { // We don't have a direct function symbol, but that may be because of // constant cast instructions in the call. // With bitcast'd call targets, the instruction will be the call - if (isa(CB)) { + if (const auto *CI = dyn_cast(CB)) { // Check if we have call alignment metadata - if (getAlign(*cast(CB), Idx, Alignment)) + if (getAlign(*CI, Idx, Alignment)) return Align(Alignment); - const Value *CalleeV = cast(CB)->getCalledValue(); + const Value *CalleeV = CI->getCalledOperand(); // Ignore any bitcast instructions while (isa(CalleeV)) { const ConstantExpr *CE = cast(CalleeV); @@ -1385,15 +1385,15 @@ // We have now looked past all of the bitcasts. Do we finally have a // Function? - if (isa(CalleeV)) - DirectCallee = CalleeV; + if (const auto *CalleeF = dyn_cast(CalleeV)) + DirectCallee = CalleeF; } } // Check for function alignment information if we found that the // ultimate target is a Function if (DirectCallee) - if (getAlign(*cast(DirectCallee), Idx, Alignment)) + if (getAlign(*DirectCallee, Idx, Alignment)) return Align(Alignment); // Call is indirect or alignment information is not available, fall back to diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -5368,7 +5368,7 @@ MachineMemOperand::MOInvariant) : MachineMemOperand::MONone; - MachinePointerInfo MPI(CB ? CB->getCalledValue() : nullptr); + MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr); // Registers used in building the DAG. const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister(); diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp @@ -271,7 +271,7 @@ J != JE; ++J) { if (CallInst *CI = dyn_cast(J)) { // Inline ASM is okay, unless it clobbers the ctr register. - if (InlineAsm *IA = dyn_cast(CI->getCalledValue())) { + if (InlineAsm *IA = dyn_cast(CI->getCalledOperand())) { if (asmClobbersCTR(IA)) return true; continue; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -761,7 +761,7 @@ return false; bool IsDirect = Func != nullptr; - if (!IsDirect && isa(Call->getCalledValue())) + if (!IsDirect && isa(Call->getCalledOperand())) return false; FunctionType *FuncTy = Call->getFunctionType(); @@ -847,7 +847,7 @@ unsigned CalleeReg = 0; if (!IsDirect) { - CalleeReg = getRegForValue(Call->getCalledValue()); + CalleeReg = getRegForValue(Call->getCalledOperand()); if (!CalleeReg) return false; } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp @@ -76,7 +76,7 @@ if (!CB) // Skip uses that aren't immediately called continue; - Value *Callee = CB->getCalledValue(); + Value *Callee = CB->getCalledOperand(); if (Callee != V) // Skip calls where the function isn't the callee continue; @@ -307,7 +307,7 @@ if (CallMain) { Main->setName("__original_main"); auto *MainWrapper = - cast(CallMain->getCalledValue()->stripPointerCasts()); + cast(CallMain->getCalledOperand()->stripPointerCasts()); delete CallMain; if (Main->isDeclaration()) { // The wrapper is not needed in this case as we don't need to export diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp @@ -258,11 +258,11 @@ bool runSjLjOnFunction(Function &F); Function *getFindMatchingCatch(Module &M, unsigned NumClauses); - template Value *wrapInvoke(CallOrInvoke *CI); + Value *wrapInvoke(CallBase *CI); void wrapTestSetjmp(BasicBlock *BB, DebugLoc DL, Value *Threw, Value *SetjmpTable, Value *SetjmpTableSize, Value *&Label, Value *&LongjmpResult, BasicBlock *&EndBB); - template Function *getInvokeWrapper(CallOrInvoke *CI); + Function *getInvokeWrapper(CallBase *CI); bool areAllExceptionsAllowed() const { return EHWhitelistSet.empty(); } bool canLongjmp(Module &M, const Value *Callee) const; @@ -388,15 +388,14 @@ // %__THREW__.val = __THREW__; __THREW__ = 0; // Returns %__THREW__.val, which indicates whether an exception is thrown (or // whether longjmp occurred), for future use. -template -Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) { +Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallBase *CI) { LLVMContext &C = CI->getModule()->getContext(); // If we are calling a function that is noreturn, we must remove that // attribute. The code we insert here does expect it to return, after we // catch the exception. if (CI->doesNotReturn()) { - if (auto *F = dyn_cast(CI->getCalledValue())) + if (auto *F = CI->getCalledFunction()) F->removeFnAttr(Attribute::NoReturn); CI->removeAttribute(AttributeList::FunctionIndex, Attribute::NoReturn); } @@ -412,7 +411,7 @@ SmallVector Args; // Put the pointer to the callee as first argument, so it can be called // within the invoke wrapper later - Args.push_back(CI->getCalledValue()); + Args.push_back(CI->getCalledOperand()); Args.append(CI->arg_begin(), CI->arg_end()); CallInst *NewCall = IRB.CreateCall(getInvokeWrapper(CI), Args); NewCall->takeName(CI); @@ -460,18 +459,10 @@ } // Get matching invoke wrapper based on callee signature -template -Function *WebAssemblyLowerEmscriptenEHSjLj::getInvokeWrapper(CallOrInvoke *CI) { +Function *WebAssemblyLowerEmscriptenEHSjLj::getInvokeWrapper(CallBase *CI) { Module *M = CI->getModule(); SmallVector ArgTys; - Value *Callee = CI->getCalledValue(); - FunctionType *CalleeFTy; - if (auto *F = dyn_cast(Callee)) - CalleeFTy = F->getFunctionType(); - else { - auto *CalleeTy = cast(Callee->getType())->getElementType(); - CalleeFTy = cast(CalleeTy); - } + FunctionType *CalleeFTy = CI->getFunctionType(); std::string Sig = getSignature(CalleeFTy); if (InvokeWrappers.find(Sig) != InvokeWrappers.end()) @@ -764,7 +755,7 @@ LandingPads.insert(II->getLandingPadInst()); IRB.SetInsertPoint(II); - bool NeedInvoke = AllowExceptions && canThrow(II->getCalledValue()); + bool NeedInvoke = AllowExceptions && canThrow(II->getCalledOperand()); if (NeedInvoke) { // Wrap invoke with invoke wrapper and generate preamble/postamble Value *Threw = wrapInvoke(II); @@ -779,7 +770,7 @@ // call+branch SmallVector Args(II->arg_begin(), II->arg_end()); CallInst *NewCall = - IRB.CreateCall(II->getFunctionType(), II->getCalledValue(), Args); + IRB.CreateCall(II->getFunctionType(), II->getCalledOperand(), Args); NewCall->takeName(II); NewCall->setCallingConv(II->getCallingConv()); NewCall->setDebugLoc(II->getDebugLoc()); @@ -1005,7 +996,7 @@ if (!CI) continue; - const Value *Callee = CI->getCalledValue(); + const Value *Callee = CI->getCalledOperand(); if (!canLongjmp(M, Callee)) continue; if (isEmAsmCall(M, Callee)) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -47867,7 +47867,7 @@ } bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { - InlineAsm *IA = cast(CI->getCalledValue()); + InlineAsm *IA = cast(CI->getCalledOperand()); const std::string &AsmStr = IA->getAsmString(); diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp --- a/llvm/lib/Target/X86/X86WinEHState.cpp +++ b/llvm/lib/Target/X86/X86WinEHState.cpp @@ -754,7 +754,7 @@ auto *Call = dyn_cast(&I); if (!Call) continue; - if (Call->getCalledValue()->stripPointerCasts() != + if (Call->getCalledOperand()->stripPointerCasts() != SetJmp3.getCallee()->stripPointerCasts()) continue; diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp --- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp @@ -1167,7 +1167,7 @@ if (!CB) return false; - auto *Callee = CB->getCalledValue()->stripPointerCasts(); + auto *Callee = CB->getCalledOperand()->stripPointerCasts(); // See if the callsite is for resumption or destruction of the coroutine. auto *SubFn = dyn_cast(Callee); @@ -1197,7 +1197,7 @@ } // Grab the CalledValue from CB before erasing the CallInstr. - auto *CalledValue = CB->getCalledValue(); + auto *CalledValue = CB->getCalledOperand(); CB->eraseFromParent(); // If no more users remove it. Usually it is a bitcast of SubFn. diff --git a/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp b/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp --- a/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp +++ b/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp @@ -385,7 +385,7 @@ bool Changed = false; MDBuilder MDB(M.getContext()); for (CallBase *C : Lattice.getIndirectCalls()) { - auto RegI = CVPLatticeKey(C->getCalledValue(), IPOGrouping::Register); + auto RegI = CVPLatticeKey(C->getCalledOperand(), IPOGrouping::Register); CVPLatticeVal LV = Solver.getExistingValueState(RegI); if (!LV.isFunctionSet() || LV.getFunctions().empty()) continue; diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -658,12 +658,12 @@ return false; // Storing the value. } } else if (const CallInst *CI = dyn_cast(U)) { - if (CI->getCalledValue() != V) { + if (CI->getCalledOperand() != V) { //cerr << "NONTRAPPING USE: " << *U; return false; // Not calling the ptr } } else if (const InvokeInst *II = dyn_cast(U)) { - if (II->getCalledValue() != V) { + if (II->getCalledOperand() != V) { //cerr << "NONTRAPPING USE: " << *U; return false; // Not calling the ptr } @@ -721,7 +721,7 @@ } } else if (isa(I) || isa(I)) { CallBase *CB = cast(I); - if (CB->getCalledValue() == V) { + if (CB->getCalledOperand() == V) { // Calling through the pointer! Turn into a direct call, but be careful // that the pointer is not also being passed as an argument. CB->setCalledOperand(NewV); diff --git a/llvm/lib/Transforms/IPO/PruneEH.cpp b/llvm/lib/Transforms/IPO/PruneEH.cpp --- a/llvm/lib/Transforms/IPO/PruneEH.cpp +++ b/llvm/lib/Transforms/IPO/PruneEH.cpp @@ -136,7 +136,7 @@ } if (CheckReturnViaAsm && !SCCMightReturn) if (const auto *CB = dyn_cast(&I)) - if (const auto *IA = dyn_cast(CB->getCalledValue())) + if (const auto *IA = dyn_cast(CB->getCalledOperand())) if (IA->hasSideEffects()) SCCMightReturn = true; } diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp --- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp +++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp @@ -1028,7 +1028,7 @@ VCallSite.emitRemark("single-impl", TheFn->stripPointerCasts()->getName(), OREGetter); VCallSite.CB.setCalledOperand(ConstantExpr::getBitCast( - TheFn, VCallSite.CB.getCalledValue()->getType())); + TheFn, VCallSite.CB.getCalledOperand()->getType())); // This use is no longer unsafe. if (VCallSite.NumUnsafeUses) --*VCallSite.NumUnsafeUses; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -4163,7 +4163,7 @@ // Note: New assumption intrinsics created here are registered by // the InstCombineIRInserter object. FunctionType *AssumeIntrinsicTy = II->getFunctionType(); - Value *AssumeIntrinsic = II->getCalledValue(); + Value *AssumeIntrinsic = II->getCalledOperand(); Value *A, *B; if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) { Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName()); @@ -4541,7 +4541,7 @@ // If the callee is a pointer to a function, attempt to move any casts to the // arguments of the call/callbr/invoke. - Value *Callee = Call.getCalledValue(); + Value *Callee = Call.getCalledOperand(); if (!isa(Callee) && transformConstExprCastCall(Call)) return nullptr; @@ -4660,7 +4660,8 @@ /// If the callee is a constexpr cast of a function, attempt to move the cast to /// the arguments of the call/callbr/invoke. bool InstCombiner::transformConstExprCastCall(CallBase &Call) { - auto *Callee = dyn_cast(Call.getCalledValue()->stripPointerCasts()); + auto *Callee = + dyn_cast(Call.getCalledOperand()->stripPointerCasts()); if (!Callee) return false; @@ -4778,7 +4779,7 @@ // If the callee is just a declaration, don't change the varargsness of the // call. We don't want to introduce a varargs call where one doesn't // already exist. - PointerType *APTy = cast(Call.getCalledValue()->getType()); + PointerType *APTy = cast(Call.getCalledOperand()->getType()); if (FT->isVarArg()!=cast(APTy->getElementType())->isVarArg()) return false; @@ -4946,7 +4947,7 @@ Instruction * InstCombiner::transformCallThroughTrampoline(CallBase &Call, IntrinsicInst &Tramp) { - Value *Callee = Call.getCalledValue(); + Value *Callee = Call.getCalledOperand(); Type *CalleeTy = Callee->getType(); FunctionType *FTy = Call.getFunctionType(); AttributeList Attrs = Call.getAttributes(); diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1378,7 +1378,7 @@ *Alignment = 0; PtrOperand = XCHG->getPointerOperand(); } else if (auto CI = dyn_cast(I)) { - auto *F = dyn_cast(CI->getCalledValue()); + auto *F = CI->getCalledFunction(); if (F && (F->getName().startswith("llvm.masked.load.") || F->getName().startswith("llvm.masked.store."))) { unsigned OpOffset = 0; diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -1553,7 +1553,7 @@ Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr); SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr); auto *MTI = cast( - IRB.CreateCall(I.getFunctionType(), I.getCalledValue(), + IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(), {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()})); if (ClPreserveAlignment) { MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes); @@ -1593,7 +1593,7 @@ void DFSanVisitor::visitCallBase(CallBase &CB) { Function *F = CB.getCalledFunction(); - if ((F && F->isIntrinsic()) || isa(CB.getCalledValue())) { + if ((F && F->isIntrinsic()) || CB.isInlineAsm()) { visitOperandShadowInst(CB); return; } @@ -1606,7 +1606,7 @@ IRBuilder<> IRB(&CB); DenseMap::iterator i = - DFSF.DFS.UnwrappedFnMap.find(CB.getCalledValue()); + DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand()); if (i != DFSF.DFS.UnwrappedFnMap.end()) { Function *F = i->second; switch (DFSF.DFS.getWrapperKind(F)) { @@ -1728,8 +1728,7 @@ } } - FunctionType *FT = cast( - CB.getCalledValue()->getType()->getPointerElementType()); + FunctionType *FT = CB.getFunctionType(); if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) { IRB.CreateStore(DFSF.getShadow(CB.getArgOperand(i)), @@ -1766,7 +1765,7 @@ if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) { FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT); Value *Func = - IRB.CreateBitCast(CB.getCalledValue(), PointerType::getUnqual(NewFT)); + IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT)); std::vector Args; auto i = CB.arg_begin(), E = CB.arg_end(); diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -2743,7 +2743,7 @@ : Lower64ShadowExtend(IRB, S2, getShadowTy(&I)); Value *V1 = I.getOperand(0); Value *V2 = I.getOperand(1); - Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledValue(), + Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(), {IRB.CreateBitCast(S1, V1->getType()), V2}); Shift = IRB.CreateBitCast(Shift, getShadowTy(&I)); setShadow(&I, IRB.CreateOr(Shift, S2Conv)); @@ -3761,7 +3761,7 @@ const DataLayout &DL = F.getParent()->getDataLayout(); CallBase *CB = cast(&I); IRBuilder<> IRB(&I); - InlineAsm *IA = cast(CB->getCalledValue()); + InlineAsm *IA = cast(CB->getCalledOperand()); int OutputArgs = getNumOutputArgs(IA, CB); // The last operand of a CallInst is the function itself. int NumOperands = CB->getNumOperands() - 1; diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp --- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp +++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp @@ -786,7 +786,7 @@ for (auto I : IndirCalls) { IRBuilder<> IRB(I); CallBase &CB = cast(*I); - Value *Callee = CB.getCalledValue(); + Value *Callee = CB.getCalledOperand(); if (isa(Callee)) continue; IRB.CreateCall(SanCovTracePCIndir, IRB.CreatePointerCast(Callee, IntptrTy)); diff --git a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc --- a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc +++ b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc @@ -59,7 +59,7 @@ void run(std::vector &Candidates) { std::vector Result = findIndirectCalls(F); for (Instruction *I : Result) { - Value *Callee = cast(I)->getCalledValue(); + Value *Callee = cast(I)->getCalledOperand(); Instruction *InsertPt = I; Instruction *AnnotatedInst = I; Candidates.emplace_back(CandidateInfo{Callee, InsertPt, AnnotatedInst}); diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp --- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -381,7 +381,7 @@ dbgs() << " " << V->getName() << " " << *V << "\n"; } if (PrintLiveSetSize) { - dbgs() << "Safepoint For: " << Call->getCalledValue()->getName() << "\n"; + dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n"; dbgs() << "Number live values: " << LiveSet.size() << "\n"; } Result.LiveSet = LiveSet; @@ -1481,7 +1481,7 @@ assert(DeoptLowering.equals("live-through") && "Unsupported value!"); } - Value *CallTarget = Call->getCalledValue(); + Value *CallTarget = Call->getCalledOperand(); if (Function *F = dyn_cast(CallTarget)) { if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize) { // Calls to llvm.experimental.deoptimize are lowered to calls to the diff --git a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp --- a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp +++ b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp @@ -264,9 +264,9 @@ // Create the compare. The called value and callee must have the same type to // be compared. - if (CB.getCalledValue()->getType() != Callee->getType()) - Callee = Builder.CreateBitCast(Callee, CB.getCalledValue()->getType()); - auto *Cond = Builder.CreateICmpEQ(CB.getCalledValue(), Callee); + if (CB.getCalledOperand()->getType() != Callee->getType()) + Callee = Builder.CreateBitCast(Callee, CB.getCalledOperand()->getType()); + auto *Cond = Builder.CreateICmpEQ(CB.getCalledOperand(), Callee); // Create an if-then-else structure. The original instruction is moved into // the "else" block, and a clone of the original instruction is placed in the @@ -462,7 +462,7 @@ assert(!CB.getCalledFunction()); Module *M = CB.getCaller()->getParent(); const DataLayout &DL = M->getDataLayout(); - Value *Callee = CB.getCalledValue(); + Value *Callee = CB.getCalledOperand(); LoadInst *VTableEntryLoad = dyn_cast(Callee); if (!VTableEntryLoad) diff --git a/llvm/lib/Transforms/Utils/Evaluator.cpp b/llvm/lib/Transforms/Utils/Evaluator.cpp --- a/llvm/lib/Transforms/Utils/Evaluator.cpp +++ b/llvm/lib/Transforms/Utils/Evaluator.cpp @@ -266,7 +266,7 @@ Function * Evaluator::getCalleeWithFormalArgs(CallBase &CB, SmallVectorImpl &Formals) { - auto *V = CB.getCalledValue(); + auto *V = CB.getCalledOperand(); if (auto *Fn = getFunction(getVal(V))) return getFormalParams(CB, Fn, Formals) ? Fn : nullptr; @@ -486,7 +486,7 @@ } // Cannot handle inline asm. - if (isa(CB.getCalledValue())) { + if (CB.isInlineAsm()) { LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); return false; } @@ -568,7 +568,7 @@ if (Callee->isDeclaration()) { // If this is a function we can constant fold, do it. if (Constant *C = ConstantFoldCall(&CB, Callee, Formals, TLI)) { - InstResult = castCallResultIfNeeded(CB.getCalledValue(), C); + InstResult = castCallResultIfNeeded(CB.getCalledOperand(), C); if (!InstResult) return false; LLVM_DEBUG(dbgs() << "Constant folded function call. Result: " @@ -591,7 +591,7 @@ return false; } ValueStack.pop_back(); - InstResult = castCallResultIfNeeded(CB.getCalledValue(), RetVal); + InstResult = castCallResultIfNeeded(CB.getCalledOperand(), RetVal); if (RetVal && !InstResult) return false; diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -534,7 +534,7 @@ // instructions require no special handling. CallInst *CI = dyn_cast(I); - if (!CI || CI->doesNotThrow() || isa(CI->getCalledValue())) + if (!CI || CI->doesNotThrow() || CI->isInlineAsm()) continue; // We do not need to (and in fact, cannot) convert possibly throwing calls @@ -2149,7 +2149,7 @@ // Skip call sites which are nounwind intrinsics. auto *CalledFn = - dyn_cast(I->getCalledValue()->stripPointerCasts()); + dyn_cast(I->getCalledOperand()->stripPointerCasts()); if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow()) continue; diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -1963,7 +1963,7 @@ SmallVector OpBundles; II->getOperandBundlesAsDefs(OpBundles); CallInst *NewCall = CallInst::Create(II->getFunctionType(), - II->getCalledValue(), Args, OpBundles); + II->getCalledOperand(), Args, OpBundles); NewCall->setCallingConv(II->getCallingConv()); NewCall->setAttributes(II->getAttributes()); NewCall->setDebugLoc(II->getDebugLoc()); @@ -2014,7 +2014,7 @@ // as of this time. InvokeInst *II = - InvokeInst::Create(CI->getFunctionType(), CI->getCalledValue(), Split, + InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split, UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB); II->setDebugLoc(CI->getDebugLoc()); II->setCallingConv(CI->getCallingConv()); @@ -2045,7 +2045,7 @@ // canonicalizes unreachable insts into stores to null or undef. for (Instruction &I : *BB) { if (auto *CI = dyn_cast(&I)) { - Value *Callee = CI->getCalledValue(); + Value *Callee = CI->getCalledOperand(); // Handle intrinsic calls. if (Function *F = dyn_cast(Callee)) { auto IntrinsicID = F->getIntrinsicID(); @@ -2120,7 +2120,7 @@ Instruction *Terminator = BB->getTerminator(); if (auto *II = dyn_cast(Terminator)) { // Turn invokes that call 'nounwind' functions into ordinary calls. - Value *Callee = II->getCalledValue(); + Value *Callee = II->getCalledOperand(); if ((isa(Callee) && !NullPointerIsDefined(BB->getParent())) || isa(Callee)) { diff --git a/llvm/lib/Transforms/Utils/LowerInvoke.cpp b/llvm/lib/Transforms/Utils/LowerInvoke.cpp --- a/llvm/lib/Transforms/Utils/LowerInvoke.cpp +++ b/llvm/lib/Transforms/Utils/LowerInvoke.cpp @@ -53,7 +53,7 @@ II->getOperandBundlesAsDefs(OpBundles); // Insert a normal call instruction... CallInst *NewCall = - CallInst::Create(II->getFunctionType(), II->getCalledValue(), + CallInst::Create(II->getFunctionType(), II->getCalledOperand(), CallArgs, OpBundles, "", II); NewCall->takeName(II); NewCall->setCallingConv(II->getCallingConv()); diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -6103,7 +6103,7 @@ // A call to null is undefined. if (auto *CB = dyn_cast(Use)) return !NullPointerIsDefined(CB->getFunction()) && - CB->getCalledValue() == I; + CB->getCalledOperand() == I; } return false; } diff --git a/llvm/test/LTO/X86/type-mapping-bug3.ll b/llvm/test/LTO/X86/type-mapping-bug3.ll --- a/llvm/test/LTO/X86/type-mapping-bug3.ll +++ b/llvm/test/LTO/X86/type-mapping-bug3.ll @@ -25,7 +25,7 @@ entry: %f.addr = alloca %"T3"*load %"T3"*, %"T3"** %f.addr - ; The call with the getCalledValue() vs getCalledFunction() mismatch. + ; The call with the getCalledOperand() vs getCalledFunction() mismatch. call void @d(%"T3"* %0) unreachable } diff --git a/llvm/tools/llvm-diff/DiffConsumer.cpp b/llvm/tools/llvm-diff/DiffConsumer.cpp --- a/llvm/tools/llvm-diff/DiffConsumer.cpp +++ b/llvm/tools/llvm-diff/DiffConsumer.cpp @@ -50,15 +50,15 @@ return; } if (V->getType()->isVoidTy()) { - if (isa(V)) { + if (auto *SI = dyn_cast(V)) { out << "store to "; - printValue(cast(V)->getPointerOperand(), isL); - } else if (isa(V)) { + printValue(SI->getPointerOperand(), isL); + } else if (auto *CI = dyn_cast(V)) { out << "call to "; - printValue(cast(V)->getCalledValue(), isL); - } else if (isa(V)) { + printValue(CI->getCalledOperand(), isL); + } else if (auto *II = dyn_cast(V)) { out << "invoke to "; - printValue(cast(V)->getCalledValue(), isL); + printValue(II->getCalledOperand(), isL); } else { out << *V; } diff --git a/llvm/tools/llvm-diff/DifferenceEngine.cpp b/llvm/tools/llvm-diff/DifferenceEngine.cpp --- a/llvm/tools/llvm-diff/DifferenceEngine.cpp +++ b/llvm/tools/llvm-diff/DifferenceEngine.cpp @@ -224,7 +224,7 @@ bool diffCallSites(CallBase &L, CallBase &R, bool Complain) { // FIXME: call attributes - if (!equivalentAsOperands(L.getCalledValue(), R.getCalledValue())) { + if (!equivalentAsOperands(L.getCalledOperand(), R.getCalledOperand())) { if (Complain) Engine.log("called functions differ"); return true; } @@ -638,7 +638,8 @@ if (!isa(*I)) return; CallInst *LCall = cast(&*I); InvokeInst *RInvoke = cast(RTerm); - if (!equivalentAsOperands(LCall->getCalledValue(), RInvoke->getCalledValue())) + if (!equivalentAsOperands(LCall->getCalledOperand(), + RInvoke->getCalledOperand())) return; if (!LCall->use_empty()) Values[LCall] = RInvoke; @@ -651,7 +652,8 @@ if (!isa(*I)) return; CallInst *RCall = cast(I); InvokeInst *LInvoke = cast(LTerm); - if (!equivalentAsOperands(LInvoke->getCalledValue(), RCall->getCalledValue())) + if (!equivalentAsOperands(LInvoke->getCalledOperand(), + RCall->getCalledOperand())) return; if (!LInvoke->use_empty()) Values[LInvoke] = RCall; diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp --- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -720,7 +720,7 @@ op = b.create(loc, tys, b.getSymbolRefAttr(callee->getName()), ops); } else { - Value calledValue = processValue(ci->getCalledValue()); + Value calledValue = processValue(ci->getCalledOperand()); if (!calledValue) return failure(); ops.insert(ops.begin(), calledValue); @@ -766,7 +766,7 @@ ops, blocks[ii->getNormalDest()], normalArgs, blocks[ii->getUnwindDest()], unwindArgs); } else { - ops.insert(ops.begin(), processValue(ii->getCalledValue())); + ops.insert(ops.begin(), processValue(ii->getCalledOperand())); op = b.create(loc, tys, ops, blocks[ii->getNormalDest()], normalArgs, blocks[ii->getUnwindDest()], unwindArgs);