Index: include/llvm/IR/InstrTypes.h =================================================================== --- include/llvm/IR/InstrTypes.h +++ include/llvm/IR/InstrTypes.h @@ -1095,6 +1095,19 @@ return isDataOperand(&UI.getUse()); } + /// Given a value use iterator, return the data operand corresponding to it. + /// Iterator must actually correspond to a data operand. + unsigned getDataOperandNo(Value::const_user_iterator UI) const { + return getDataOperandNo(&UI.getUse()); + } + + /// Given a use for a data operand, get the data operand number that + /// corresponds to it. + unsigned getDataOperandNo(const Use *U) const { + assert(isDataOperand(U) && "Data operand # out of range!"); + return U - data_operands_begin(); + } + /// Return the iterator pointing to the beginning of the argument list. User::op_iterator arg_begin() { return op_begin(); } User::const_op_iterator arg_begin() const { Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -27,7 +27,6 @@ #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" @@ -1788,7 +1787,7 @@ } /// CallInst simplification. This mostly only handles folding of intrinsic -/// instructions. For normal calls, it allows visitCallSite to do the heavy +/// instructions. For normal calls, it allows visitCallBase to do the heavy /// lifting. Instruction *InstCombiner::visitCallInst(CallInst &CI) { if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI))) @@ -1805,10 +1804,10 @@ } IntrinsicInst *II = dyn_cast(&CI); - if (!II) return visitCallSite(&CI); + if (!II) return visitCallBase(CI); // Intrinsics cannot occur in an invoke, so handle them here instead of in - // visitCallSite. + // visitCallBase. if (auto *MI = dyn_cast(II)) { bool Changed = false; @@ -3962,7 +3961,7 @@ break; } } - return visitCallSite(II); + return visitCallBase(*II); } // Fence instruction simplification @@ -3977,12 +3976,12 @@ // InvokeInst simplification Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { - return visitCallSite(&II); + return visitCallBase(II); } /// If this cast does not affect the value passed through the varargs area, we /// can eliminate the use of the cast. -static bool isSafeToEliminateVarargsCast(const CallSite CS, +static bool isSafeToEliminateVarargsCast(const CallBase &CB, const DataLayout &DL, const CastInst *const CI, const int ix) { @@ -3994,13 +3993,13 @@ // TODO: This is probably something which should be expanded to all // intrinsics since the entire point of intrinsics is that // they are understandable by the optimizer. - if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)) + if (isStatepoint(&CB) || isGCRelocate(&CB) || isGCResult(&CB)) return false; // The size of ByVal or InAlloca arguments is derived from the type, so we // can't change to a type with a different size. If the size were // passed explicitly we could avoid this check. - if (!CS.isByValOrInAllocaArgument(ix)) + if (!CB.isByValOrInAllocaArgument(ix)) return true; Type* SrcTy = @@ -4109,9 +4108,9 @@ } /// Improvements for call and invoke instructions. -Instruction *InstCombiner::visitCallSite(CallSite CS) { - if (isAllocLikeFn(CS.getInstruction(), &TLI)) - return visitAllocSite(*CS.getInstruction()); +Instruction *InstCombiner::visitCallBase(CallBase &CB) { + if (isAllocLikeFn(&CB, &TLI)) + return visitAllocSite(CB); bool Changed = false; @@ -4121,49 +4120,49 @@ SmallVector ArgNos; unsigned ArgNo = 0; - for (Value *V : CS.args()) { + for (Value *V : CB.args()) { if (V->getType()->isPointerTy() && - !CS.paramHasAttr(ArgNo, Attribute::NonNull) && - isKnownNonZero(V, DL, 0, &AC, CS.getInstruction(), &DT)) + !CB.paramHasAttr(ArgNo, Attribute::NonNull) && + isKnownNonZero(V, DL, 0, &AC, &CB, &DT)) ArgNos.push_back(ArgNo); ArgNo++; } - assert(ArgNo == CS.arg_size() && "sanity check"); + assert(ArgNo == CB.arg_size() && "sanity check"); if (!ArgNos.empty()) { - AttributeList AS = CS.getAttributes(); - LLVMContext &Ctx = CS.getInstruction()->getContext(); + AttributeList AS = CB.getAttributes(); + LLVMContext &Ctx = CB.getContext(); AS = AS.addParamAttribute(Ctx, ArgNos, Attribute::get(Ctx, Attribute::NonNull)); - CS.setAttributes(AS); + CB.setAttributes(AS); Changed = true; } // If the callee is a pointer to a function, attempt to move any casts to the // arguments of the call/invoke. - Value *Callee = CS.getCalledValue(); - if (!isa(Callee) && transformConstExprCastCall(CS)) + Value *Callee = CB.getCalledValue(); + if (!isa(Callee) && transformConstExprCastCall(CB)) return nullptr; if (Function *CalleeF = dyn_cast(Callee)) { // Remove the convergent attr on calls when the callee is not convergent. - if (CS.isConvergent() && !CalleeF->isConvergent() && + if (CB.isConvergent() && !CalleeF->isConvergent() && !CalleeF->isIntrinsic()) { LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " - << CS.getInstruction() << "\n"); - CS.setNotConvergent(); - return CS.getInstruction(); + << CB << "\n"); + CB.setNotConvergent(); + return &CB; } // If the call and callee calling conventions don't match, this call must // be unreachable, as the call is undefined. - if (CalleeF->getCallingConv() != CS.getCallingConv() && + if (CalleeF->getCallingConv() != CB.getCallingConv() && // Only do this for calls to a function with a body. A prototype may // not actually end up matching the implementation's calling conv for a // variety of reasons (e.g. it may be written in assembly). !CalleeF->isDeclaration()) { - Instruction *OldCall = CS.getInstruction(); + Instruction *OldCall = &CB; new StoreInst(ConstantInt::getTrue(Callee->getContext()), UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), OldCall); @@ -4183,15 +4182,15 @@ } if ((isa(Callee) && - !NullPointerIsDefined(CS.getInstruction()->getFunction())) || + !NullPointerIsDefined(CB.getFunction())) || isa(Callee)) { - // If CS does not return void then replaceAllUsesWith undef. + // If CB does not return void then replaceAllUsesWith undef. // This allows ValueHandlers and custom metadata to adjust itself. - if (!CS.getInstruction()->getType()->isVoidTy()) - replaceInstUsesWith(*CS.getInstruction(), - UndefValue::get(CS.getInstruction()->getType())); + if (!CB.getType()->isVoidTy()) + replaceInstUsesWith(CB, + UndefValue::get(CB.getType())); - if (isa(CS.getInstruction())) { + if (isa(CB)) { // Can't remove an invoke because we cannot change the CFG. return nullptr; } @@ -4201,13 +4200,13 @@ // that we can't modify the CFG here. new StoreInst(ConstantInt::getTrue(Callee->getContext()), UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), - CS.getInstruction()); + &CB); - return eraseInstFromFunction(*CS.getInstruction()); + return eraseInstFromFunction(CB); } if (IntrinsicInst *II = findInitTrampoline(Callee)) - return transformCallThroughTrampoline(CS, II); + return transformCallThroughTrampoline(CB, *II); PointerType *PTy = cast(Callee->getType()); FunctionType *FTy = cast(PTy->getElementType()); @@ -4215,39 +4214,39 @@ int ix = FTy->getNumParams(); // See if we can optimize any arguments passed through the varargs area of // the call. - for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(), - E = CS.arg_end(); I != E; ++I, ++ix) { + for (auto I = CB.arg_begin() + FTy->getNumParams(), + E = CB.arg_end(); I != E; ++I, ++ix) { CastInst *CI = dyn_cast(*I); - if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) { + if (CI && isSafeToEliminateVarargsCast(CB, DL, CI, ix)) { *I = CI->getOperand(0); Changed = true; } } } - if (isa(Callee) && !CS.doesNotThrow()) { + if (isa(Callee) && !CB.doesNotThrow()) { // Inline asm calls cannot throw - mark them 'nounwind'. - CS.setDoesNotThrow(); + CB.setDoesNotThrow(); Changed = true; } // Try to optimize the call if possible, we require DataLayout for most of // this. None of these calls are seen as possibly dead so go ahead and // delete the instruction now. - if (CallInst *CI = dyn_cast(CS.getInstruction())) { + if (CallInst *CI = dyn_cast(&CB)) { Instruction *I = tryOptimizeCall(CI); // If we changed something return the result, etc. Otherwise let // the fallthrough check. if (I) return eraseInstFromFunction(*I); } - return Changed ? CS.getInstruction() : nullptr; + return Changed ? &CB : nullptr; } /// If the callee is a constexpr cast of a function, attempt to move the cast to /// the arguments of the call/invoke. -bool InstCombiner::transformConstExprCastCall(CallSite CS) { - auto *Callee = dyn_cast(CS.getCalledValue()->stripPointerCasts()); +bool InstCombiner::transformConstExprCastCall(CallBase &CB) { + auto *Callee = dyn_cast(CB.getCalledValue()->stripPointerCasts()); if (!Callee) return false; @@ -4261,11 +4260,11 @@ // prototype with the exception of pointee types. The code below doesn't // implement that, so we can't do this transform. // TODO: Do the transform if it only requires adding pointer casts. - if (CS.isMustTailCall()) + if (isa(CB) && cast(CB).isMustTailCall()) return false; - Instruction *Caller = CS.getInstruction(); - const AttributeList &CallerPAL = CS.getAttributes(); + Instruction *Caller = &CB; + const AttributeList &CallerPAL = CB.getAttributes(); // Okay, this is a cast from a function to a different type. Unless doing so // would cause a type conversion of one of our arguments, change this call to @@ -4296,7 +4295,7 @@ return false; // Attribute not compatible with transformed value. } - // If the callsite is an invoke instruction, and the return value is used by + // If the callbase is an invoke instruction, and the return value is used by // a PHI node in a successor, we cannot change the return type of the call // because there is no place to put the cast instruction (without breaking // the critical edge). Bail out in this case. @@ -4309,7 +4308,7 @@ return false; } - unsigned NumActualArgs = CS.arg_size(); + unsigned NumActualArgs = CB.arg_size(); unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); // Prevent us turning: @@ -4324,7 +4323,7 @@ Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal)) return false; - CallSite::arg_iterator AI = CS.arg_begin(); + auto AI = CB.arg_begin(); for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { Type *ParamTy = FT->getParamType(i); Type *ActTy = (*AI)->getType(); @@ -4336,7 +4335,7 @@ .overlaps(AttributeFuncs::typeIncompatible(ParamTy))) return false; // Attribute not compatible with transformed value. - if (CS.isInAllocaArgument(i)) + if (CB.isInAllocaArgument(i)) return false; // Cannot transform to and from inalloca. // If the parameter is passed as a byval argument, then we have to have a @@ -4361,7 +4360,7 @@ // If the callee is just a declaration, don't change the varargsness of the // call. We don't want to introduce a varargs call where one doesn't // already exist. - PointerType *APTy = cast(CS.getCalledValue()->getType()); + PointerType *APTy = cast(CB.getCalledValue()->getType()); if (FT->isVarArg()!=cast(APTy->getElementType())->isVarArg()) return false; @@ -4400,7 +4399,7 @@ // with the existing attributes. Wipe out any problematic attributes. RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy)); - AI = CS.arg_begin(); + AI = CB.arg_begin(); for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { Type *ParamTy = FT->getParamType(i); @@ -4454,29 +4453,29 @@ Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs); SmallVector OpBundles; - CS.getOperandBundlesAsDefs(OpBundles); + CB.getOperandBundlesAsDefs(OpBundles); - CallSite NewCS; + CallBase *NewCB; if (InvokeInst *II = dyn_cast(Caller)) { - NewCS = Builder.CreateInvoke(Callee, II->getNormalDest(), + NewCB = Builder.CreateInvoke(Callee, II->getNormalDest(), II->getUnwindDest(), Args, OpBundles); } else { - NewCS = Builder.CreateCall(Callee, Args, OpBundles); - cast(NewCS.getInstruction()) + NewCB = Builder.CreateCall(Callee, Args, OpBundles); + cast(NewCB) ->setTailCallKind(cast(Caller)->getTailCallKind()); } - NewCS->takeName(Caller); - NewCS.setCallingConv(CS.getCallingConv()); - NewCS.setAttributes(NewCallerPAL); + NewCB->takeName(Caller); + NewCB->setCallingConv(CB.getCallingConv()); + NewCB->setAttributes(NewCallerPAL); // Preserve the weight metadata for the new call instruction. The metadata // is used by SamplePGO to check callsite's hotness. uint64_t W; if (Caller->extractProfTotalWeight(W)) - NewCS->setProfWeight(W); + NewCB->setProfWeight(W); // Insert a cast of the return type as necessary. - Instruction *NC = NewCS.getInstruction(); + Instruction *NC = NewCB; Value *NV = NC; if (OldRetTy != NV->getType() && !Caller->use_empty()) { if (!NV->getType()->isVoidTy()) { @@ -4516,22 +4515,19 @@ /// Turn a call to a function created by init_trampoline / adjust_trampoline /// intrinsic pair into a direct call to the underlying function. Instruction * -InstCombiner::transformCallThroughTrampoline(CallSite CS, - IntrinsicInst *Tramp) { - Value *Callee = CS.getCalledValue(); +InstCombiner::transformCallThroughTrampoline(CallBase &CB, + IntrinsicInst &Tramp) { + Value *Callee = CB.getCalledValue(); PointerType *PTy = cast(Callee->getType()); FunctionType *FTy = cast(PTy->getElementType()); - AttributeList Attrs = CS.getAttributes(); + AttributeList Attrs = CB.getAttributes(); // If the call already has the 'nest' attribute somewhere then give up - // otherwise 'nest' would occur twice after splicing in the chain. if (Attrs.hasAttrSomewhere(Attribute::Nest)) return nullptr; - assert(Tramp && - "transformCallThroughTrampoline called with incorrect CallSite."); - - Function *NestF =cast(Tramp->getArgOperand(1)->stripPointerCasts()); + Function *NestF =cast(Tramp.getArgOperand(1)->stripPointerCasts()); FunctionType *NestFTy = cast(NestF->getValueType()); AttributeList NestAttrs = NestF->getAttributes(); @@ -4554,22 +4550,22 @@ } if (NestTy) { - Instruction *Caller = CS.getInstruction(); + Instruction *Caller = &CB; std::vector NewArgs; std::vector NewArgAttrs; - NewArgs.reserve(CS.arg_size() + 1); - NewArgAttrs.reserve(CS.arg_size()); + NewArgs.reserve(CB.arg_size() + 1); + NewArgAttrs.reserve(CB.arg_size()); // Insert the nest argument into the call argument list, which may // mean appending it. Likewise for attributes. { unsigned ArgNo = 0; - CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); + auto I = CB.arg_begin(), E = CB.arg_end(); do { if (ArgNo == NestArgNo) { // Add the chain argument and attributes. - Value *NestVal = Tramp->getArgOperand(2); + Value *NestVal = Tramp.getArgOperand(2); if (NestVal->getType() != NestTy) NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest"); NewArgs.push_back(NestVal); @@ -4631,7 +4627,7 @@ Attrs.getRetAttributes(), NewArgAttrs); SmallVector OpBundles; - CS.getOperandBundlesAsDefs(OpBundles); + CB.getOperandBundlesAsDefs(OpBundles); Instruction *NewCaller; if (InvokeInst *II = dyn_cast(Caller)) { @@ -4660,6 +4656,6 @@ Constant *NewCallee = NestF->getType() == PTy ? NestF : ConstantExpr::getBitCast(NestF, PTy); - CS.setCalledFunction(NewCallee); - return CS.getInstruction(); + CB.setCalledFunction(NewCallee); + return &CB; } Index: lib/Transforms/InstCombine/InstCombineInternal.h =================================================================== --- lib/Transforms/InstCombine/InstCombineInternal.h +++ lib/Transforms/InstCombine/InstCombineInternal.h @@ -52,7 +52,6 @@ class APInt; class AssumptionCache; -class CallSite; class DataLayout; class DominatorTree; class GEPOperator; @@ -467,11 +466,11 @@ Instruction &CtxI, Value *&OperationResult, Constant *&OverflowResult); - Instruction *visitCallSite(CallSite CS); + Instruction *visitCallBase(CallBase &CB); Instruction *tryOptimizeCall(CallInst *CI); - bool transformConstExprCastCall(CallSite CS); - Instruction *transformCallThroughTrampoline(CallSite CS, - IntrinsicInst *Tramp); + bool transformConstExprCastCall(CallBase &CB); + Instruction *transformCallThroughTrampoline(CallBase &CB, + IntrinsicInst &Tramp); /// Transform (zext icmp) to bitwise / integer operations in order to /// eliminate it. Index: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -88,29 +88,29 @@ continue; } - if (auto CS = CallSite(I)) { + if (auto *CB = dyn_cast(I)) { // If this is the function being called then we treat it like a load and // ignore it. - if (CS.isCallee(&U)) + if (CB->isCallee(&U)) continue; - unsigned DataOpNo = CS.getDataOperandNo(&U); - bool IsArgOperand = CS.isArgOperand(&U); + unsigned DataOpNo = CB->getDataOperandNo(&U); + bool IsArgOperand = CB->isArgOperand(&U); // Inalloca arguments are clobbered by the call. - if (IsArgOperand && CS.isInAllocaArgument(DataOpNo)) + if (IsArgOperand && CB->isInAllocaArgument(DataOpNo)) return false; // If this is a readonly/readnone call site, then we know it is just a // load (but one that potentially returns the value itself), so we can // ignore it if we know that the value isn't captured. - if (CS.onlyReadsMemory() && - (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo))) + if (CB->onlyReadsMemory() && + (CB->use_empty() || CB->doesNotCapture(DataOpNo))) continue; // If this is being passed as a byval argument, the caller is making a // copy, so it is only a read of the alloca. - if (IsArgOperand && CS.isByValArgument(DataOpNo)) + if (IsArgOperand && CB->isByValArgument(DataOpNo)) continue; } Index: lib/Transforms/InstCombine/InstCombineMulDivRem.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -1160,7 +1160,8 @@ IRBuilder<> B(&I); IRBuilder<>::FastMathFlagGuard FMFGuard(B); B.setFastMathFlags(I.getFastMathFlags()); - AttributeList Attrs = CallSite(Op0).getCalledFunction()->getAttributes(); + AttributeList Attrs = + cast(Op0)->getCalledFunction()->getAttributes(); Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf, LibFunc_tanl, B, Attrs); if (IsCot)