Index: llvm/include/llvm/Analysis/AliasAnalysis.h =================================================================== --- llvm/include/llvm/Analysis/AliasAnalysis.h +++ llvm/include/llvm/Analysis/AliasAnalysis.h @@ -43,7 +43,6 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/MemoryLocation.h" #include "llvm/Analysis/TargetLibraryInfo.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" @@ -381,15 +380,15 @@ /// \name Simple mod/ref information /// @{ - /// Get the ModRef info associated with a pointer argument of a callsite. The + /// Get the ModRef info associated with a pointer argument of a call. The /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note /// that these bits do not necessarily account for the overall behavior of /// the function, but rather only provide additional per-argument /// information. This never sets ModRefInfo::Must. - ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx); + ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx); /// Return the behavior of the given call site. - FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS); + FunctionModRefBehavior getModRefBehavior(const CallBase *Call); /// Return the behavior when calling the given function. FunctionModRefBehavior getModRefBehavior(const Function *F); @@ -405,8 +404,8 @@ /// property (e.g. calls to 'sin' and 'cos'). /// /// This property corresponds to the GCC 'const' attribute. - bool doesNotAccessMemory(ImmutableCallSite CS) { - return getModRefBehavior(CS) == FMRB_DoesNotAccessMemory; + bool doesNotAccessMemory(const CallBase *Call) { + return getModRefBehavior(Call) == FMRB_DoesNotAccessMemory; } /// Checks if the specified function is known to never read or write memory. @@ -433,8 +432,8 @@ /// absence of interfering store instructions, such as CSE of strlen calls. /// /// This property corresponds to the GCC 'pure' attribute. - bool onlyReadsMemory(ImmutableCallSite CS) { - return onlyReadsMemory(getModRefBehavior(CS)); + bool onlyReadsMemory(const CallBase *Call) { + return onlyReadsMemory(getModRefBehavior(Call)); } /// Checks if the specified function is known to only read from non-volatile @@ -499,36 +498,12 @@ /// getModRefInfo (for call sites) - Return information about whether /// a particular call site modifies or reads the specified memory location. - ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc); + ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc); /// getModRefInfo (for call sites) - A convenience wrapper. - ModRefInfo getModRefInfo(ImmutableCallSite CS, const Value *P, - LocationSize Size) { - return getModRefInfo(CS, MemoryLocation(P, Size)); - } - - /// getModRefInfo (for calls) - Return information about whether - /// a particular call modifies or reads the specified memory location. - ModRefInfo getModRefInfo(const CallInst *C, const MemoryLocation &Loc) { - return getModRefInfo(ImmutableCallSite(C), Loc); - } - - /// getModRefInfo (for calls) - A convenience wrapper. - ModRefInfo getModRefInfo(const CallInst *C, const Value *P, + ModRefInfo getModRefInfo(const CallBase *Call, const Value *P, LocationSize Size) { - return getModRefInfo(C, MemoryLocation(P, Size)); - } - - /// getModRefInfo (for invokes) - Return information about whether - /// a particular invoke modifies or reads the specified memory location. - ModRefInfo getModRefInfo(const InvokeInst *I, const MemoryLocation &Loc) { - return getModRefInfo(ImmutableCallSite(I), Loc); - } - - /// getModRefInfo (for invokes) - A convenience wrapper. - ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P, - LocationSize Size) { - return getModRefInfo(I, MemoryLocation(P, Size)); + return getModRefInfo(Call, MemoryLocation(P, Size)); } /// getModRefInfo (for loads) - Return information about whether @@ -625,8 +600,8 @@ ModRefInfo getModRefInfo(const Instruction *I, const Optional &OptLoc) { if (OptLoc == None) { - if (auto CS = ImmutableCallSite(I)) { - return createModRefInfo(getModRefBehavior(CS)); + if (const auto *Call = dyn_cast(I)) { + return createModRefInfo(getModRefBehavior(Call)); } } @@ -660,12 +635,12 @@ /// Return information about whether a call and an instruction may refer to /// the same memory locations. - ModRefInfo getModRefInfo(Instruction *I, ImmutableCallSite Call); + ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call); /// Return information about whether two call sites may refer to the same set /// of memory locations. See the AA documentation for details: /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo - ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2); + ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2); /// Return information about whether a particular call site modifies /// or reads the specified memory location \p MemLoc before instruction \p I @@ -776,25 +751,25 @@ /// that these bits do not necessarily account for the overall behavior of /// the function, but rather only provide additional per-argument /// information. - virtual ModRefInfo getArgModRefInfo(ImmutableCallSite CS, + virtual ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) = 0; /// Return the behavior of the given call site. - virtual FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) = 0; + virtual FunctionModRefBehavior getModRefBehavior(const CallBase *Call) = 0; /// Return the behavior when calling the given function. virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0; /// getModRefInfo (for call sites) - Return information about whether /// a particular call site modifies or reads the specified memory location. - virtual ModRefInfo getModRefInfo(ImmutableCallSite CS, + virtual ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) = 0; /// Return information about whether two call sites may refer to the same set /// of memory locations. See the AA documentation for details: /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo - virtual ModRefInfo getModRefInfo(ImmutableCallSite CS1, - ImmutableCallSite CS2) = 0; + virtual ModRefInfo getModRefInfo(const CallBase *Call1, + const CallBase *Call2) = 0; /// @} }; @@ -826,26 +801,26 @@ return Result.pointsToConstantMemory(Loc, OrLocal); } - ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) override { - return Result.getArgModRefInfo(CS, ArgIdx); + ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) override { + return Result.getArgModRefInfo(Call, ArgIdx); } - FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) override { - return Result.getModRefBehavior(CS); + FunctionModRefBehavior getModRefBehavior(const CallBase *Call) override { + return Result.getModRefBehavior(Call); } FunctionModRefBehavior getModRefBehavior(const Function *F) override { return Result.getModRefBehavior(F); } - ModRefInfo getModRefInfo(ImmutableCallSite CS, + ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) override { - return Result.getModRefInfo(CS, Loc); + return Result.getModRefInfo(Call, Loc); } - ModRefInfo getModRefInfo(ImmutableCallSite CS1, - ImmutableCallSite CS2) override { - return Result.getModRefInfo(CS1, CS2); + ModRefInfo getModRefInfo(const CallBase *Call1, + const CallBase *Call2) override { + return Result.getModRefInfo(Call1, Call2); } }; @@ -900,25 +875,28 @@ : CurrentResult.pointsToConstantMemory(Loc, OrLocal); } - ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { - return AAR ? AAR->getArgModRefInfo(CS, ArgIdx) : CurrentResult.getArgModRefInfo(CS, ArgIdx); + ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { + return AAR ? AAR->getArgModRefInfo(Call, ArgIdx) + : CurrentResult.getArgModRefInfo(Call, ArgIdx); } - FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) { - return AAR ? AAR->getModRefBehavior(CS) : CurrentResult.getModRefBehavior(CS); + FunctionModRefBehavior getModRefBehavior(const CallBase *Call) { + return AAR ? AAR->getModRefBehavior(Call) + : CurrentResult.getModRefBehavior(Call); } FunctionModRefBehavior getModRefBehavior(const Function *F) { return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F); } - ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { - return AAR ? AAR->getModRefInfo(CS, Loc) - : CurrentResult.getModRefInfo(CS, Loc); + ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) { + return AAR ? AAR->getModRefInfo(Call, Loc) + : CurrentResult.getModRefInfo(Call, Loc); } - ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { - return AAR ? AAR->getModRefInfo(CS1, CS2) : CurrentResult.getModRefInfo(CS1, CS2); + ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) { + return AAR ? AAR->getModRefInfo(Call1, Call2) + : CurrentResult.getModRefInfo(Call1, Call2); } }; @@ -950,11 +928,11 @@ return false; } - ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { + ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { return ModRefInfo::ModRef; } - FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) { + FunctionModRefBehavior getModRefBehavior(const CallBase *Call) { return FMRB_UnknownModRefBehavior; } @@ -962,11 +940,11 @@ return FMRB_UnknownModRefBehavior; } - ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { + ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) { return ModRefInfo::ModRef; } - ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { + ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) { return ModRefInfo::ModRef; } }; Index: llvm/include/llvm/Analysis/BasicAliasAnalysis.h =================================================================== --- llvm/include/llvm/Analysis/BasicAliasAnalysis.h +++ llvm/include/llvm/Analysis/BasicAliasAnalysis.h @@ -21,7 +21,7 @@ #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/MemoryLocation.h" -#include "llvm/IR/CallSite.h" +#include "llvm/IR/InstrTypes.h" #include "llvm/IR/PassManager.h" #include "llvm/Pass.h" #include @@ -84,18 +84,18 @@ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB); - ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc); + ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc); - ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2); + ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2); /// Chases pointers until we find a (constant global) or not. bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal); /// Get the location associated with a pointer argument of a callsite. - ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx); + ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx); /// Returns the behavior when calling the given call site. - FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS); + FunctionModRefBehavior getModRefBehavior(const CallBase *Call); /// Returns the behavior when calling the given function. For use when the /// call site is not known. Index: llvm/include/llvm/Analysis/GlobalsModRef.h =================================================================== --- llvm/include/llvm/Analysis/GlobalsModRef.h +++ llvm/include/llvm/Analysis/GlobalsModRef.h @@ -88,7 +88,7 @@ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB); using AAResultBase::getModRefInfo; - ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc); + ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc); /// getModRefBehavior - Return the behavior of the specified function if /// called from the specified call site. The call site may be null in which @@ -98,7 +98,7 @@ /// getModRefBehavior - Return the behavior of the specified function if /// called from the specified call site. The call site may be null in which /// case the most generic behavior of this function should be returned. - FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS); + FunctionModRefBehavior getModRefBehavior(const CallBase *Call); private: FunctionInfo *getFunctionInfo(const Function *F); @@ -113,7 +113,7 @@ void CollectSCCMembership(CallGraph &CG); bool isNonEscapingGlobalNoAlias(const GlobalValue *GV, const Value *V); - ModRefInfo getModRefInfoForArgument(ImmutableCallSite CS, + ModRefInfo getModRefInfoForArgument(const CallBase *Call, const GlobalValue *GV); }; Index: llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h =================================================================== --- llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h +++ llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h @@ -37,7 +37,6 @@ namespace llvm { class AssumptionCache; -class CallSite; class DominatorTree; class Function; class Instruction; @@ -398,7 +397,7 @@ /// invalidated on the next non-local query or when an instruction is /// removed. Clients must copy this data if they want it around longer than /// that. - const NonLocalDepInfo &getNonLocalCallDependency(CallSite QueryCS); + const NonLocalDepInfo &getNonLocalCallDependency(CallBase *QueryCall); /// Perform a full dependency query for an access to the QueryInst's /// specified memory location, returning the set of instructions that either @@ -482,9 +481,9 @@ void releaseMemory(); private: - MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall, - BasicBlock::iterator ScanIt, - BasicBlock *BB); + MemDepResult getCallDependencyFrom(CallBase *Call, bool isReadOnlyCall, + BasicBlock::iterator ScanIt, + BasicBlock *BB); bool getNonLocalPointerDepFromBB(Instruction *QueryInst, const PHITransAddr &Pointer, const MemoryLocation &Loc, bool isLoad, Index: llvm/include/llvm/Analysis/MemoryLocation.h =================================================================== --- llvm/include/llvm/Analysis/MemoryLocation.h +++ llvm/include/llvm/Analysis/MemoryLocation.h @@ -16,9 +16,9 @@ #ifndef LLVM_ANALYSIS_MEMORYLOCATION_H #define LLVM_ANALYSIS_MEMORYLOCATION_H -#include "llvm/ADT/Optional.h" #include "llvm/ADT/DenseMapInfo.h" -#include "llvm/IR/CallSite.h" +#include "llvm/ADT/Optional.h" +#include "llvm/IR/Instructions.h" #include "llvm/IR/Metadata.h" namespace llvm { @@ -231,11 +231,11 @@ static MemoryLocation getForDest(const AnyMemIntrinsic *MI); /// Return a location representing a particular argument of a call. - static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx, + static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI); - static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx, + static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo &TLI) { - return getForArgument(CS, ArgIdx, &TLI); + return getForArgument(Call, ArgIdx, &TLI); } explicit MemoryLocation(const Value *Ptr = nullptr, Index: llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h =================================================================== --- llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h +++ llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h @@ -60,7 +60,7 @@ FunctionModRefBehavior getModRefBehavior(const Function *F); using AAResultBase::getModRefInfo; - ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc); + ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc); }; /// Analysis pass providing a never-invalidated alias analysis result. Index: llvm/include/llvm/Analysis/ScopedNoAliasAA.h =================================================================== --- llvm/include/llvm/Analysis/ScopedNoAliasAA.h +++ llvm/include/llvm/Analysis/ScopedNoAliasAA.h @@ -16,7 +16,7 @@ #define LLVM_ANALYSIS_SCOPEDNOALIASAA_H #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/IR/CallSite.h" +#include "llvm/IR/InstrTypes.h" #include "llvm/IR/PassManager.h" #include "llvm/Pass.h" #include @@ -41,8 +41,8 @@ } AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB); - ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc); - ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2); + ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc); + ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2); private: bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const; Index: llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h =================================================================== --- llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h +++ llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h @@ -17,7 +17,7 @@ #define LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/IR/CallSite.h" +#include "llvm/IR/InstrTypes.h" #include "llvm/IR/PassManager.h" #include "llvm/Pass.h" #include @@ -43,10 +43,10 @@ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB); bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal); - FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS); + FunctionModRefBehavior getModRefBehavior(const CallBase *Call); FunctionModRefBehavior getModRefBehavior(const Function *F); - ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc); - ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2); + ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc); + ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2); private: bool Aliases(const MDNode *A, const MDNode *B) const; Index: llvm/include/llvm/Analysis/ValueTracking.h =================================================================== --- llvm/include/llvm/Analysis/ValueTracking.h +++ llvm/include/llvm/Analysis/ValueTracking.h @@ -297,10 +297,10 @@ /// This function returns call pointer argument that is considered the same by /// aliasing rules. You CAN'T use it to replace one value with another. - const Value *getArgumentAliasingToReturnedPointer(ImmutableCallSite CS); - inline Value *getArgumentAliasingToReturnedPointer(CallSite CS) { - return const_cast( - getArgumentAliasingToReturnedPointer(ImmutableCallSite(CS))); + const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call); + inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call) { + return const_cast(getArgumentAliasingToReturnedPointer( + const_cast(Call))); } // {launder,strip}.invariant.group returns pointer that aliases its argument, @@ -309,7 +309,7 @@ // considered as capture. The arguments are not marked as returned neither, // because it would make it useless. bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( - ImmutableCallSite CS); + const CallBase *Call); /// This method strips off any GEP address adjustments and pointer casts from /// the specified value, returning the original object being addressed. Note Index: llvm/lib/Analysis/AliasAnalysis.cpp =================================================================== --- llvm/lib/Analysis/AliasAnalysis.cpp +++ llvm/lib/Analysis/AliasAnalysis.cpp @@ -40,7 +40,6 @@ #include "llvm/IR/Argument.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" @@ -118,11 +117,11 @@ return false; } -ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { +ModRefInfo AAResults::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { ModRefInfo Result = ModRefInfo::ModRef; for (const auto &AA : AAs) { - Result = intersectModRef(Result, AA->getArgModRefInfo(CS, ArgIdx)); + Result = intersectModRef(Result, AA->getArgModRefInfo(Call, ArgIdx)); // Early-exit the moment we reach the bottom of the lattice. if (isNoModRef(Result)) @@ -132,11 +131,11 @@ return Result; } -ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) { +ModRefInfo AAResults::getModRefInfo(Instruction *I, const CallBase *Call2) { // We may have two calls. - if (auto CS = ImmutableCallSite(I)) { + if (const auto *Call1 = dyn_cast(I)) { // Check if the two calls modify the same memory. - return getModRefInfo(CS, Call); + return getModRefInfo(Call1, Call2); } else if (I->isFenceLike()) { // If this is a fence, just return ModRef. return ModRefInfo::ModRef; @@ -146,19 +145,19 @@ // is that if the call references what this instruction // defines, it must be clobbered by this location. const MemoryLocation DefLoc = MemoryLocation::get(I); - ModRefInfo MR = getModRefInfo(Call, DefLoc); + ModRefInfo MR = getModRefInfo(Call2, DefLoc); if (isModOrRefSet(MR)) return setModAndRef(MR); } return ModRefInfo::NoModRef; } -ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS, +ModRefInfo AAResults::getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) { ModRefInfo Result = ModRefInfo::ModRef; for (const auto &AA : AAs) { - Result = intersectModRef(Result, AA->getModRefInfo(CS, Loc)); + Result = intersectModRef(Result, AA->getModRefInfo(Call, Loc)); // Early-exit the moment we reach the bottom of the lattice. if (isNoModRef(Result)) @@ -167,7 +166,7 @@ // Try to refine the mod-ref info further using other API entry points to the // aggregate set of AA results. - auto MRB = getModRefBehavior(CS); + auto MRB = getModRefBehavior(Call); if (MRB == FMRB_DoesNotAccessMemory || MRB == FMRB_OnlyAccessesInaccessibleMem) return ModRefInfo::NoModRef; @@ -181,15 +180,16 @@ bool IsMustAlias = true; ModRefInfo AllArgsMask = ModRefInfo::NoModRef; if (doesAccessArgPointees(MRB)) { - for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) { + for (auto AI = Call->arg_begin(), AE = Call->arg_end(); AI != AE; ++AI) { const Value *Arg = *AI; if (!Arg->getType()->isPointerTy()) continue; - unsigned ArgIdx = std::distance(CS.arg_begin(), AI); - MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI); + unsigned ArgIdx = std::distance(Call->arg_begin(), AI); + MemoryLocation ArgLoc = + MemoryLocation::getForArgument(Call, ArgIdx, TLI); AliasResult ArgAlias = alias(ArgLoc, Loc); if (ArgAlias != NoAlias) { - ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx); + ModRefInfo ArgMask = getArgModRefInfo(Call, ArgIdx); AllArgsMask = unionModRef(AllArgsMask, ArgMask); } // Conservatively clear IsMustAlias unless only MustAlias is found. @@ -213,12 +213,12 @@ return Result; } -ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1, - ImmutableCallSite CS2) { +ModRefInfo AAResults::getModRefInfo(const CallBase *Call1, + const CallBase *Call2) { ModRefInfo Result = ModRefInfo::ModRef; for (const auto &AA : AAs) { - Result = intersectModRef(Result, AA->getModRefInfo(CS1, CS2)); + Result = intersectModRef(Result, AA->getModRefInfo(Call1, Call2)); // Early-exit the moment we reach the bottom of the lattice. if (isNoModRef(Result)) @@ -228,59 +228,61 @@ // Try to refine the mod-ref info further using other API entry points to the // aggregate set of AA results. - // If CS1 or CS2 are readnone, they don't interact. - auto CS1B = getModRefBehavior(CS1); - if (CS1B == FMRB_DoesNotAccessMemory) + // If Call1 or Call2 are readnone, they don't interact. + auto Call1B = getModRefBehavior(Call1); + if (Call1B == FMRB_DoesNotAccessMemory) return ModRefInfo::NoModRef; - auto CS2B = getModRefBehavior(CS2); - if (CS2B == FMRB_DoesNotAccessMemory) + auto Call2B = getModRefBehavior(Call2); + if (Call2B == FMRB_DoesNotAccessMemory) return ModRefInfo::NoModRef; // If they both only read from memory, there is no dependence. - if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B)) + if (onlyReadsMemory(Call1B) && onlyReadsMemory(Call2B)) return ModRefInfo::NoModRef; - // If CS1 only reads memory, the only dependence on CS2 can be - // from CS1 reading memory written by CS2. - if (onlyReadsMemory(CS1B)) + // If Call1 only reads memory, the only dependence on Call2 can be + // from Call1 reading memory written by Call2. + if (onlyReadsMemory(Call1B)) Result = clearMod(Result); - else if (doesNotReadMemory(CS1B)) + else if (doesNotReadMemory(Call1B)) Result = clearRef(Result); - // If CS2 only access memory through arguments, accumulate the mod/ref - // information from CS1's references to the memory referenced by - // CS2's arguments. - if (onlyAccessesArgPointees(CS2B)) { - if (!doesAccessArgPointees(CS2B)) + // If Call2 only access memory through arguments, accumulate the mod/ref + // information from Call1's references to the memory referenced by + // Call2's arguments. + if (onlyAccessesArgPointees(Call2B)) { + if (!doesAccessArgPointees(Call2B)) return ModRefInfo::NoModRef; ModRefInfo R = ModRefInfo::NoModRef; bool IsMustAlias = true; - for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) { + for (auto I = Call2->arg_begin(), E = Call2->arg_end(); I != E; ++I) { const Value *Arg = *I; if (!Arg->getType()->isPointerTy()) continue; - unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I); - auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI); - - // ArgModRefCS2 indicates what CS2 might do to CS2ArgLoc, and the - // dependence of CS1 on that location is the inverse: - // - If CS2 modifies location, dependence exists if CS1 reads or writes. - // - If CS2 only reads location, dependence exists if CS1 writes. - ModRefInfo ArgModRefCS2 = getArgModRefInfo(CS2, CS2ArgIdx); + unsigned Call2ArgIdx = std::distance(Call2->arg_begin(), I); + auto Call2ArgLoc = + MemoryLocation::getForArgument(Call2, Call2ArgIdx, TLI); + + // ArgModRefC2 indicates what Call2 might do to Call2ArgLoc, and the + // dependence of Call1 on that location is the inverse: + // - If Call2 modifies location, dependence exists if Call1 reads or + // writes. + // - If Call2 only reads location, dependence exists if Call1 writes. + ModRefInfo ArgModRefC2 = getArgModRefInfo(Call2, Call2ArgIdx); ModRefInfo ArgMask = ModRefInfo::NoModRef; - if (isModSet(ArgModRefCS2)) + if (isModSet(ArgModRefC2)) ArgMask = ModRefInfo::ModRef; - else if (isRefSet(ArgModRefCS2)) + else if (isRefSet(ArgModRefC2)) ArgMask = ModRefInfo::Mod; - // ModRefCS1 indicates what CS1 might do to CS2ArgLoc, and we use + // ModRefC1 indicates what Call1 might do to Call2ArgLoc, and we use // above ArgMask to update dependence info. - ModRefInfo ModRefCS1 = getModRefInfo(CS1, CS2ArgLoc); - ArgMask = intersectModRef(ArgMask, ModRefCS1); + ModRefInfo ModRefC1 = getModRefInfo(Call1, Call2ArgLoc); + ArgMask = intersectModRef(ArgMask, ModRefC1); // Conservatively clear IsMustAlias unless only MustAlias is found. - IsMustAlias &= isMustSet(ModRefCS1); + IsMustAlias &= isMustSet(ModRefC1); R = intersectModRef(unionModRef(R, ArgMask), Result); if (R == Result) { @@ -298,31 +300,32 @@ return IsMustAlias ? setMust(R) : clearMust(R); } - // If CS1 only accesses memory through arguments, check if CS2 references - // any of the memory referenced by CS1's arguments. If not, return NoModRef. - if (onlyAccessesArgPointees(CS1B)) { - if (!doesAccessArgPointees(CS1B)) + // If Call1 only accesses memory through arguments, check if Call2 references + // any of the memory referenced by Call1's arguments. If not, return NoModRef. + if (onlyAccessesArgPointees(Call1B)) { + if (!doesAccessArgPointees(Call1B)) return ModRefInfo::NoModRef; ModRefInfo R = ModRefInfo::NoModRef; bool IsMustAlias = true; - for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) { + for (auto I = Call1->arg_begin(), E = Call1->arg_end(); I != E; ++I) { const Value *Arg = *I; if (!Arg->getType()->isPointerTy()) continue; - unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I); - auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI); - - // ArgModRefCS1 indicates what CS1 might do to CS1ArgLoc; if CS1 might - // Mod CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If - // CS1 might Ref, then we care only about a Mod by CS2. - ModRefInfo ArgModRefCS1 = getArgModRefInfo(CS1, CS1ArgIdx); - ModRefInfo ModRefCS2 = getModRefInfo(CS2, CS1ArgLoc); - if ((isModSet(ArgModRefCS1) && isModOrRefSet(ModRefCS2)) || - (isRefSet(ArgModRefCS1) && isModSet(ModRefCS2))) - R = intersectModRef(unionModRef(R, ArgModRefCS1), Result); + unsigned Call1ArgIdx = std::distance(Call1->arg_begin(), I); + auto Call1ArgLoc = + MemoryLocation::getForArgument(Call1, Call1ArgIdx, TLI); + + // ArgModRefC1 indicates what Call1 might do to Call1ArgLoc; if Call1 + // might Mod Call1ArgLoc, then we care about either a Mod or a Ref by + // Call2. If Call1 might Ref, then we care only about a Mod by Call2. + ModRefInfo ArgModRefC1 = getArgModRefInfo(Call1, Call1ArgIdx); + ModRefInfo ModRefC2 = getModRefInfo(Call2, Call1ArgLoc); + if ((isModSet(ArgModRefC1) && isModOrRefSet(ModRefC2)) || + (isRefSet(ArgModRefC1) && isModSet(ModRefC2))) + R = intersectModRef(unionModRef(R, ArgModRefC1), Result); // Conservatively clear IsMustAlias unless only MustAlias is found. - IsMustAlias &= isMustSet(ModRefCS2); + IsMustAlias &= isMustSet(ModRefC2); if (R == Result) { // On early exit, not all args were checked, cannot set Must. @@ -342,11 +345,11 @@ return Result; } -FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) { +FunctionModRefBehavior AAResults::getModRefBehavior(const CallBase *Call) { FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior; for (const auto &AA : AAs) { - Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS)); + Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(Call)); // Early-exit the moment we reach the bottom of the lattice. if (Result == FMRB_DoesNotAccessMemory) @@ -558,8 +561,8 @@ isa(Object)) return ModRefInfo::ModRef; - ImmutableCallSite CS(I); - if (!CS.getInstruction() || CS.getInstruction() == Object) + const auto *Call = dyn_cast(I); + if (!Call || Call == Object) return ModRefInfo::ModRef; if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true, @@ -572,14 +575,14 @@ ModRefInfo R = ModRefInfo::NoModRef; bool IsMustAlias = true; // Set flag only if no May found and all operands processed. - for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); + for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); CI != CE; ++CI, ++ArgNo) { // Only look at the no-capture or byval pointer arguments. If this // pointer were passed to arguments that were neither of these, then it // couldn't be no-capture. if (!(*CI)->getType()->isPointerTy() || - (!CS.doesNotCapture(ArgNo) && - ArgNo < CS.getNumArgOperands() && !CS.isByValArgument(ArgNo))) + (!Call->doesNotCapture(ArgNo) && ArgNo < Call->getNumArgOperands() && + !Call->isByValArgument(ArgNo))) continue; AliasResult AR = alias(MemoryLocation(*CI), MemoryLocation(Object)); @@ -591,9 +594,9 @@ IsMustAlias = false; if (AR == NoAlias) continue; - if (CS.doesNotAccessMemory(ArgNo)) + if (Call->doesNotAccessMemory(ArgNo)) continue; - if (CS.onlyReadsMemory(ArgNo)) { + if (Call->onlyReadsMemory(ArgNo)) { R = ModRefInfo::Ref; continue; } @@ -775,8 +778,8 @@ } bool llvm::isNoAliasCall(const Value *V) { - if (auto CS = ImmutableCallSite(V)) - return CS.hasRetAttr(Attribute::NoAlias); + if (const auto *Call = dyn_cast(V)) + return Call->hasRetAttr(Attribute::NoAlias); return false; } Index: llvm/lib/Analysis/AliasAnalysisEvaluator.cpp =================================================================== --- llvm/lib/Analysis/AliasAnalysisEvaluator.cpp +++ llvm/lib/Analysis/AliasAnalysisEvaluator.cpp @@ -66,11 +66,10 @@ } } -static inline void PrintModRefResults(const char *Msg, bool P, CallSite CSA, - CallSite CSB, Module *M) { +static inline void PrintModRefResults(const char *Msg, bool P, CallBase *CallA, + CallBase *CallB, Module *M) { if (PrintAll || P) { - errs() << " " << Msg << ": " << *CSA.getInstruction() << " <-> " - << *CSB.getInstruction() << '\n'; + errs() << " " << Msg << ": " << *CallA << " <-> " << *CallB << '\n'; } } @@ -98,7 +97,7 @@ ++FunctionCount; SetVector Pointers; - SmallSetVector CallSites; + SmallSetVector Calls; SetVector Loads; SetVector Stores; @@ -114,16 +113,16 @@ if (EvalAAMD && isa(&*I)) Stores.insert(&*I); Instruction &Inst = *I; - if (auto CS = CallSite(&Inst)) { - Value *Callee = CS.getCalledValue(); + if (auto *Call = dyn_cast(&Inst)) { + Value *Callee = Call->getCalledValue(); // Skip actual functions for direct function calls. if (!isa(Callee) && isInterestingPointer(Callee)) Pointers.insert(Callee); // Consider formals. - for (Use &DataOp : CS.data_ops()) + for (Use &DataOp : Call->data_ops()) if (isInterestingPointer(DataOp)) Pointers.insert(DataOp); - CallSites.insert(CS); + Calls.insert(Call); } else { // Consider all operands. for (Instruction::op_iterator OI = Inst.op_begin(), OE = Inst.op_end(); @@ -136,7 +135,7 @@ if (PrintAll || PrintNoAlias || PrintMayAlias || PrintPartialAlias || PrintMustAlias || PrintNoModRef || PrintMod || PrintRef || PrintModRef) errs() << "Function: " << F.getName() << ": " << Pointers.size() - << " pointers, " << CallSites.size() << " call sites\n"; + << " pointers, " << Calls.size() << " call sites\n"; // iterate over the worklist, and run the full (n^2)/2 disambiguations for (SetVector::iterator I1 = Pointers.begin(), E = Pointers.end(); @@ -228,49 +227,47 @@ } // Mod/ref alias analysis: compare all pairs of calls and values - for (CallSite C : CallSites) { - Instruction *I = C.getInstruction(); - + for (CallBase *Call : Calls) { for (auto Pointer : Pointers) { uint64_t Size = MemoryLocation::UnknownSize; Type *ElTy = cast(Pointer->getType())->getElementType(); if (ElTy->isSized()) Size = DL.getTypeStoreSize(ElTy); - switch (AA.getModRefInfo(C, Pointer, Size)) { + switch (AA.getModRefInfo(Call, Pointer, Size)) { case ModRefInfo::NoModRef: - PrintModRefResults("NoModRef", PrintNoModRef, I, Pointer, + PrintModRefResults("NoModRef", PrintNoModRef, Call, Pointer, F.getParent()); ++NoModRefCount; break; case ModRefInfo::Mod: - PrintModRefResults("Just Mod", PrintMod, I, Pointer, F.getParent()); + PrintModRefResults("Just Mod", PrintMod, Call, Pointer, F.getParent()); ++ModCount; break; case ModRefInfo::Ref: - PrintModRefResults("Just Ref", PrintRef, I, Pointer, F.getParent()); + PrintModRefResults("Just Ref", PrintRef, Call, Pointer, F.getParent()); ++RefCount; break; case ModRefInfo::ModRef: - PrintModRefResults("Both ModRef", PrintModRef, I, Pointer, + PrintModRefResults("Both ModRef", PrintModRef, Call, Pointer, F.getParent()); ++ModRefCount; break; case ModRefInfo::Must: - PrintModRefResults("Must", PrintMust, I, Pointer, F.getParent()); + PrintModRefResults("Must", PrintMust, Call, Pointer, F.getParent()); ++MustCount; break; case ModRefInfo::MustMod: - PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, I, Pointer, + PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, Call, Pointer, F.getParent()); ++MustModCount; break; case ModRefInfo::MustRef: - PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, I, Pointer, + PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, Call, Pointer, F.getParent()); ++MustRefCount; break; case ModRefInfo::MustModRef: - PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, I, + PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, Call, Pointer, F.getParent()); ++MustModRefCount; break; @@ -279,44 +276,46 @@ } // Mod/ref alias analysis: compare all pairs of calls - for (auto C = CallSites.begin(), Ce = CallSites.end(); C != Ce; ++C) { - for (auto D = CallSites.begin(); D != Ce; ++D) { - if (D == C) + for (CallBase *CallA : Calls) { + for (CallBase *CallB : Calls) { + if (CallA == CallB) continue; - switch (AA.getModRefInfo(*C, *D)) { + switch (AA.getModRefInfo(CallA, CallB)) { case ModRefInfo::NoModRef: - PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent()); + PrintModRefResults("NoModRef", PrintNoModRef, CallA, CallB, + F.getParent()); ++NoModRefCount; break; case ModRefInfo::Mod: - PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent()); + PrintModRefResults("Just Mod", PrintMod, CallA, CallB, F.getParent()); ++ModCount; break; case ModRefInfo::Ref: - PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent()); + PrintModRefResults("Just Ref", PrintRef, CallA, CallB, F.getParent()); ++RefCount; break; case ModRefInfo::ModRef: - PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent()); + PrintModRefResults("Both ModRef", PrintModRef, CallA, CallB, + F.getParent()); ++ModRefCount; break; case ModRefInfo::Must: - PrintModRefResults("Must", PrintMust, *C, *D, F.getParent()); + PrintModRefResults("Must", PrintMust, CallA, CallB, F.getParent()); ++MustCount; break; case ModRefInfo::MustMod: - PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, *C, *D, + PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, CallA, CallB, F.getParent()); ++MustModCount; break; case ModRefInfo::MustRef: - PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, *C, *D, + PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, CallA, CallB, F.getParent()); ++MustRefCount; break; case ModRefInfo::MustModRef: - PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, *C, *D, - F.getParent()); + PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, CallA, + CallB, F.getParent()); ++MustModRefCount; break; } Index: llvm/lib/Analysis/AliasSetTracker.cpp =================================================================== --- llvm/lib/Analysis/AliasSetTracker.cpp +++ llvm/lib/Analysis/AliasSetTracker.cpp @@ -16,7 +16,6 @@ #include "llvm/Analysis/GuardUtils.h" #include "llvm/Analysis/MemoryLocation.h" #include "llvm/Config/llvm-config.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" @@ -236,7 +235,8 @@ for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i) { if (auto *UnknownInst = getUnknownInst(i)) { - ImmutableCallSite C1(UnknownInst), C2(Inst); + const auto *C1 = dyn_cast(UnknownInst); + const auto *C2 = dyn_cast(Inst); if (!C1 || !C2 || isModOrRefSet(AA.getModRefInfo(C1, C2)) || isModOrRefSet(AA.getModRefInfo(C2, C1))) return true; @@ -446,44 +446,44 @@ return add(MTI); // Handle all calls with known mod/ref sets genericall - CallSite CS(I); - if (CS && CS.onlyAccessesArgMemory()) { - auto getAccessFromModRef = [](ModRefInfo MRI) { - if (isRefSet(MRI) && isModSet(MRI)) - return AliasSet::ModRefAccess; - else if (isModSet(MRI)) - return AliasSet::ModAccess; - else if (isRefSet(MRI)) - return AliasSet::RefAccess; - else - return AliasSet::NoAccess; - - }; - - ModRefInfo CallMask = createModRefInfo(AA.getModRefBehavior(CS)); - - // Some intrinsics are marked as modifying memory for control flow - // modelling purposes, but don't actually modify any specific memory - // location. - using namespace PatternMatch; - if (I->use_empty() && match(I, m_Intrinsic())) - CallMask = clearMod(CallMask); - - for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) { - const Value *Arg = *AI; - if (!Arg->getType()->isPointerTy()) - continue; - unsigned ArgIdx = std::distance(CS.arg_begin(), AI); - MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, - nullptr); - ModRefInfo ArgMask = AA.getArgModRefInfo(CS, ArgIdx); - ArgMask = intersectModRef(CallMask, ArgMask); - if (!isNoModRef(ArgMask)) - addPointer(ArgLoc, getAccessFromModRef(ArgMask)); + if (auto *Call = dyn_cast(I)) + if (Call->onlyAccessesArgMemory()) { + auto getAccessFromModRef = [](ModRefInfo MRI) { + if (isRefSet(MRI) && isModSet(MRI)) + return AliasSet::ModRefAccess; + else if (isModSet(MRI)) + return AliasSet::ModAccess; + else if (isRefSet(MRI)) + return AliasSet::RefAccess; + else + return AliasSet::NoAccess; + }; + + ModRefInfo CallMask = createModRefInfo(AA.getModRefBehavior(Call)); + + // Some intrinsics are marked as modifying memory for control flow + // modelling purposes, but don't actually modify any specific memory + // location. + using namespace PatternMatch; + if (Call->use_empty() && + match(Call, m_Intrinsic())) + CallMask = clearMod(CallMask); + + for (auto IdxArgPair : enumerate(Call->args())) { + int ArgIdx = IdxArgPair.index(); + const Value *Arg = IdxArgPair.value(); + if (!Arg->getType()->isPointerTy()) + continue; + MemoryLocation ArgLoc = + MemoryLocation::getForArgument(Call, ArgIdx, nullptr); + ModRefInfo ArgMask = AA.getArgModRefInfo(Call, ArgIdx); + ArgMask = intersectModRef(CallMask, ArgMask); + if (!isNoModRef(ArgMask)) + addPointer(ArgLoc, getAccessFromModRef(ArgMask)); + } + return; } - return; - } - + return addUnknown(I); } Index: llvm/lib/Analysis/BasicAliasAnalysis.cpp =================================================================== --- llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -31,7 +31,6 @@ #include "llvm/Analysis/PhiValues.h" #include "llvm/IR/Argument.h" #include "llvm/IR/Attributes.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" @@ -134,7 +133,7 @@ /// Returns true if the pointer is one which would have been considered an /// escape by isNonEscapingLocalObject. static bool isEscapeSource(const Value *V) { - if (ImmutableCallSite(V)) + if (isa(V)) return true; if (isa(V)) @@ -436,7 +435,7 @@ const GEPOperator *GEPOp = dyn_cast(Op); if (!GEPOp) { - if (auto CS = ImmutableCallSite(V)) { + if (const auto *Call = dyn_cast(V)) { // CaptureTracking can know about special capturing properties of some // intrinsics like launder.invariant.group, that can't be expressed with // the attributes, but have properties like returning aliasing pointer. @@ -446,7 +445,7 @@ // because it should be in sync with CaptureTracking. Not using it may // cause weird miscompilations where 2 aliasing pointers are assumed to // noalias. - if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) { + if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) { V = RP; continue; } @@ -640,8 +639,8 @@ } /// Returns the behavior when calling the given call site. -FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) { - if (CS.doesNotAccessMemory()) +FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { + if (Call->doesNotAccessMemory()) // Can't do better than this. return FMRB_DoesNotAccessMemory; @@ -649,23 +648,23 @@ // If the callsite knows it only reads memory, don't return worse // than that. - if (CS.onlyReadsMemory()) + if (Call->onlyReadsMemory()) Min = FMRB_OnlyReadsMemory; - else if (CS.doesNotReadMemory()) + else if (Call->doesNotReadMemory()) Min = FMRB_DoesNotReadMemory; - if (CS.onlyAccessesArgMemory()) + if (Call->onlyAccessesArgMemory()) Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); - else if (CS.onlyAccessesInaccessibleMemory()) + else if (Call->onlyAccessesInaccessibleMemory()) Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); - else if (CS.onlyAccessesInaccessibleMemOrArgMem()) + else if (Call->onlyAccessesInaccessibleMemOrArgMem()) Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); - // If CS has operand bundles then aliasing attributes from the function it - // calls do not directly apply to the CallSite. This can be made more - // precise in the future. - if (!CS.hasOperandBundles()) - if (const Function *F = CS.getCalledFunction()) + // If the call has operand bundles then aliasing attributes from the function + // it calls do not directly apply to the call. This can be made more precise + // in the future. + if (!Call->hasOperandBundles()) + if (const Function *F = Call->getCalledFunction()) Min = FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); @@ -698,9 +697,9 @@ } /// Returns true if this is a writeonly (i.e Mod only) parameter. -static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx, +static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo &TLI) { - if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly)) + if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) return true; // We can bound the aliasing properties of memset_pattern16 just as we can @@ -710,7 +709,8 @@ // FIXME Consider handling this in InferFunctionAttr.cpp together with other // attributes. LibFunc F; - if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) && + if (Call->getCalledFunction() && + TLI.getLibFunc(*Call->getCalledFunction(), F) && F == LibFunc_memset_pattern16 && TLI.has(F)) if (ArgIdx == 0) return true; @@ -722,23 +722,23 @@ return false; } -ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS, +ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { // Checking for known builtin intrinsics and target library functions. - if (isWriteOnlyParam(CS, ArgIdx, TLI)) + if (isWriteOnlyParam(Call, ArgIdx, TLI)) return ModRefInfo::Mod; - if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly)) + if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) return ModRefInfo::Ref; - if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone)) + if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) return ModRefInfo::NoModRef; - return AAResultBase::getArgModRefInfo(CS, ArgIdx); + return AAResultBase::getArgModRefInfo(Call, ArgIdx); } -static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) { - const IntrinsicInst *II = dyn_cast(CS.getInstruction()); +static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { + const IntrinsicInst *II = dyn_cast(Call); return II && II->getIntrinsicID() == IID; } @@ -794,9 +794,9 @@ /// Since we only look at local properties of this function, we really can't /// say much about this query. We do, however, use simple "address taken" /// analysis on local objects. -ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, +ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) { - assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && + assert(notDifferentParent(Call, Loc.Ptr) && "AliasAnalysis query involving multiple functions!"); const Value *Object = GetUnderlyingObject(Loc.Ptr, DL); @@ -807,7 +807,7 @@ // contents of the alloca into argument registers or stack slots, so there is // no lifetime issue. if (isa(Object)) - if (const CallInst *CI = dyn_cast(CS.getInstruction())) + if (const CallInst *CI = dyn_cast(Call)) if (CI->isTailCall() && !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) return ModRefInfo::NoModRef; @@ -815,7 +815,7 @@ // If the pointer is to a locally allocated object that does not escape, // then the call can not mod/ref the pointer unless the call takes the pointer // as an argument, and itself doesn't capture it. - if (!isa(Object) && CS.getInstruction() != Object && + if (!isa(Object) && Call != Object && isNonEscapingLocalObject(Object)) { // Optimistically assume that call doesn't touch Object and check this @@ -824,19 +824,20 @@ bool IsMustAlias = true; unsigned OperandNo = 0; - for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); + for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); CI != CE; ++CI, ++OperandNo) { // Only look at the no-capture or byval pointer arguments. If this // pointer were passed to arguments that were neither of these, then it // couldn't be no-capture. if (!(*CI)->getType()->isPointerTy() || - (!CS.doesNotCapture(OperandNo) && - OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo))) + (!Call->doesNotCapture(OperandNo) && + OperandNo < Call->getNumArgOperands() && + !Call->isByValArgument(OperandNo))) continue; // Call doesn't access memory through this operand, so we don't care // if it aliases with Object. - if (CS.doesNotAccessMemory(OperandNo)) + if (Call->doesNotAccessMemory(OperandNo)) continue; // If this is a no-capture pointer argument, see if we can tell that it @@ -850,12 +851,12 @@ continue; // Operand aliases 'Object', but call doesn't modify it. Strengthen // initial assumption and keep looking in case if there are more aliases. - if (CS.onlyReadsMemory(OperandNo)) { + if (Call->onlyReadsMemory(OperandNo)) { Result = setRef(Result); continue; } // Operand aliases 'Object' but call only writes into it. - if (CS.doesNotReadMemory(OperandNo)) { + if (Call->doesNotReadMemory(OperandNo)) { Result = setMod(Result); continue; } @@ -879,17 +880,16 @@ } } - // If the CallSite is to malloc or calloc, we can assume that it doesn't + // If the call is to malloc or calloc, we can assume that it doesn't // modify any IR visible value. This is only valid because we assume these // routines do not read values visible in the IR. TODO: Consider special // casing realloc and strdup routines which access only their arguments as // well. Or alternatively, replace all of this with inaccessiblememonly once // that's implemented fully. - auto *Inst = CS.getInstruction(); - if (isMallocOrCallocLikeFn(Inst, &TLI)) { + if (isMallocOrCallocLikeFn(Call, &TLI)) { // Be conservative if the accessed pointer may alias the allocation - // fallback to the generic handling below. - if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias) + if (getBestAAResults().alias(MemoryLocation(Call), Loc) == NoAlias) return ModRefInfo::NoModRef; } @@ -897,7 +897,7 @@ // operands, i.e., source and destination of any given memcpy must no-alias. // If Loc must-aliases either one of these two locations, then it necessarily // no-aliases the other. - if (auto *Inst = dyn_cast(CS.getInstruction())) { + if (auto *Inst = dyn_cast(Call)) { AliasResult SrcAA, DestAA; if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), @@ -921,7 +921,7 @@ // While the assume intrinsic is marked as arbitrarily writing so that // proper control dependencies will be maintained, it never aliases any // particular memory location. - if (isIntrinsicCall(CS, Intrinsic::assume)) + if (isIntrinsicCall(Call, Intrinsic::assume)) return ModRefInfo::NoModRef; // Like assumes, guard intrinsics are also marked as arbitrarily writing so @@ -931,7 +931,7 @@ // *Unlike* assumes, guard intrinsics are modeled as reading memory since the // heap state at the point the guard is issued needs to be consistent in case // the guard invokes the "deopt" continuation. - if (isIntrinsicCall(CS, Intrinsic::experimental_guard)) + if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) return ModRefInfo::Ref; // Like assumes, invariant.start intrinsics were also marked as arbitrarily @@ -957,20 +957,20 @@ // The transformation will cause the second store to be ignored (based on // rules of invariant.start) and print 40, while the first program always // prints 50. - if (isIntrinsicCall(CS, Intrinsic::invariant_start)) + if (isIntrinsicCall(Call, Intrinsic::invariant_start)) return ModRefInfo::Ref; // The AAResultBase base class has some smarts, lets use them. - return AAResultBase::getModRefInfo(CS, Loc); + return AAResultBase::getModRefInfo(Call, Loc); } -ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1, - ImmutableCallSite CS2) { +ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, + const CallBase *Call2) { // While the assume intrinsic is marked as arbitrarily writing so that // proper control dependencies will be maintained, it never aliases any // particular memory location. - if (isIntrinsicCall(CS1, Intrinsic::assume) || - isIntrinsicCall(CS2, Intrinsic::assume)) + if (isIntrinsicCall(Call1, Intrinsic::assume) || + isIntrinsicCall(Call2, Intrinsic::assume)) return ModRefInfo::NoModRef; // Like assumes, guard intrinsics are also marked as arbitrarily writing so @@ -984,18 +984,18 @@ // NB! This function is *not* commutative, so we specical case two // possibilities for guard intrinsics. - if (isIntrinsicCall(CS1, Intrinsic::experimental_guard)) - return isModSet(createModRefInfo(getModRefBehavior(CS2))) + if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) + return isModSet(createModRefInfo(getModRefBehavior(Call2))) ? ModRefInfo::Ref : ModRefInfo::NoModRef; - if (isIntrinsicCall(CS2, Intrinsic::experimental_guard)) - return isModSet(createModRefInfo(getModRefBehavior(CS1))) + if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) + return isModSet(createModRefInfo(getModRefBehavior(Call1))) ? ModRefInfo::Mod : ModRefInfo::NoModRef; // The AAResultBase base class has some smarts, lets use them. - return AAResultBase::getModRefInfo(CS1, CS2); + return AAResultBase::getModRefInfo(Call1, Call2); } /// Provide ad-hoc rules to disambiguate accesses through two GEP operators, Index: llvm/lib/Analysis/CaptureTracking.cpp =================================================================== --- llvm/lib/Analysis/CaptureTracking.cpp +++ llvm/lib/Analysis/CaptureTracking.cpp @@ -23,7 +23,6 @@ #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/OrderedBasicBlock.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" @@ -239,11 +238,12 @@ switch (I->getOpcode()) { case Instruction::Call: case Instruction::Invoke: { - CallSite CS(I); + auto *Call = cast(I); // Not captured if the callee is readonly, doesn't return a copy through // its return value and doesn't unwind (a readonly function can leak bits // by throwing an exception or not depending on the input value). - if (CS.onlyReadsMemory() && CS.doesNotThrow() && I->getType()->isVoidTy()) + if (Call->onlyReadsMemory() && Call->doesNotThrow() && + Call->getType()->isVoidTy()) break; // The pointer is not captured if returned pointer is not captured. @@ -251,14 +251,14 @@ // marked with nocapture do not capture. This means that places like // GetUnderlyingObject in ValueTracking or DecomposeGEPExpression // in BasicAA also need to know about this property. - if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CS)) { - AddUses(I); + if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call)) { + AddUses(Call); break; } // Volatile operations effectively capture the memory location that they // load and store to. - if (auto *MI = dyn_cast(I)) + if (auto *MI = dyn_cast(Call)) if (MI->isVolatile()) if (Tracker->captured(U)) return; @@ -270,13 +270,14 @@ // that loading a value from a pointer does not cause the pointer to be // captured, even though the loaded value might be the pointer itself // (think of self-referential objects). - CallSite::data_operand_iterator B = - CS.data_operands_begin(), E = CS.data_operands_end(); - for (CallSite::data_operand_iterator A = B; A != E; ++A) - if (A->get() == V && !CS.doesNotCapture(A - B)) + for (auto IdxOpPair : enumerate(Call->data_ops())) { + int Idx = IdxOpPair.index(); + Value *A = IdxOpPair.value(); + if (A == V && !Call->doesNotCapture(Idx)) // The parameter is not marked 'nocapture' - captured. if (Tracker->captured(U)) return; + } break; } case Instruction::Load: Index: llvm/lib/Analysis/GlobalsModRef.cpp =================================================================== --- llvm/lib/Analysis/GlobalsModRef.cpp +++ llvm/lib/Analysis/GlobalsModRef.cpp @@ -255,11 +255,11 @@ } FunctionModRefBehavior -GlobalsAAResult::getModRefBehavior(ImmutableCallSite CS) { +GlobalsAAResult::getModRefBehavior(const CallBase *Call) { FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; - if (!CS.hasOperandBundles()) - if (const Function *F = CS.getCalledFunction()) + if (!Call->hasOperandBundles()) + if (const Function *F = Call->getCalledFunction()) if (FunctionInfo *FI = getFunctionInfo(F)) { if (!isModOrRefSet(FI->getModRefInfo())) Min = FMRB_DoesNotAccessMemory; @@ -267,7 +267,7 @@ Min = FMRB_OnlyReadsMemory; } - return FunctionModRefBehavior(AAResultBase::getModRefBehavior(CS) & Min); + return FunctionModRefBehavior(AAResultBase::getModRefBehavior(Call) & Min); } /// Returns the function info for the function, or null if we don't have @@ -366,14 +366,14 @@ } else if (Operator::getOpcode(I) == Instruction::BitCast) { if (AnalyzeUsesOfPointer(I, Readers, Writers, OkayStoreDest)) return true; - } else if (auto CS = CallSite(I)) { + } else if (auto *Call = dyn_cast(I)) { // Make sure that this is just the function being called, not that it is // passing into the function. - if (CS.isDataOperand(&U)) { + if (Call->isDataOperand(&U)) { // Detect calls to free. - if (CS.isArgOperand(&U) && isFreeCall(I, &TLI)) { + if (Call->isArgOperand(&U) && isFreeCall(I, &TLI)) { if (Writers) - Writers->insert(CS->getParent()->getParent()); + Writers->insert(Call->getParent()->getParent()); } else { return true; // Argument of an unknown call. } @@ -576,15 +576,15 @@ // We handle calls specially because the graph-relevant aspects are // handled above. - if (auto CS = CallSite(&I)) { - if (isAllocationFn(&I, &TLI) || isFreeCall(&I, &TLI)) { + if (auto *Call = dyn_cast(&I)) { + if (isAllocationFn(Call, &TLI) || isFreeCall(Call, &TLI)) { // FIXME: It is completely unclear why this is necessary and not // handled by the above graph code. FI.addModRefInfo(ModRefInfo::ModRef); - } else if (Function *Callee = CS.getCalledFunction()) { + } else if (Function *Callee = Call->getCalledFunction()) { // The callgraph doesn't include intrinsic calls. if (Callee->isIntrinsic()) { - if (isa(I)) + if (isa(Call)) // Don't let dbg intrinsics affect alias info. continue; @@ -885,16 +885,16 @@ return AAResultBase::alias(LocA, LocB); } -ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS, +ModRefInfo GlobalsAAResult::getModRefInfoForArgument(const CallBase *Call, const GlobalValue *GV) { - if (CS.doesNotAccessMemory()) + if (Call->doesNotAccessMemory()) return ModRefInfo::NoModRef; ModRefInfo ConservativeResult = - CS.onlyReadsMemory() ? ModRefInfo::Ref : ModRefInfo::ModRef; + Call->onlyReadsMemory() ? ModRefInfo::Ref : ModRefInfo::ModRef; // Iterate through all the arguments to the called function. If any argument // is based on GV, return the conservative result. - for (auto &A : CS.args()) { + for (auto &A : Call->args()) { SmallVector Objects; GetUnderlyingObjects(A, Objects, DL); @@ -914,7 +914,7 @@ return ModRefInfo::NoModRef; } -ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS, +ModRefInfo GlobalsAAResult::getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) { ModRefInfo Known = ModRefInfo::ModRef; @@ -923,15 +923,15 @@ if (const GlobalValue *GV = dyn_cast(GetUnderlyingObject(Loc.Ptr, DL))) if (GV->hasLocalLinkage()) - if (const Function *F = CS.getCalledFunction()) + if (const Function *F = Call->getCalledFunction()) if (NonAddressTakenGlobals.count(GV)) if (const FunctionInfo *FI = getFunctionInfo(F)) Known = unionModRef(FI->getModRefInfoForGlobal(*GV), - getModRefInfoForArgument(CS, GV)); + getModRefInfoForArgument(Call, GV)); if (!isModOrRefSet(Known)) return ModRefInfo::NoModRef; // No need to query other mod/ref analyses - return intersectModRef(Known, AAResultBase::getModRefInfo(CS, Loc)); + return intersectModRef(Known, AAResultBase::getModRefInfo(Call, Loc)); } GlobalsAAResult::GlobalsAAResult(const DataLayout &DL, Index: llvm/lib/Analysis/Loads.cpp =================================================================== --- llvm/lib/Analysis/Loads.cpp +++ llvm/lib/Analysis/Loads.cpp @@ -107,8 +107,8 @@ return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size, DL, CtxI, DT, Visited); - if (auto CS = ImmutableCallSite(V)) - if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) + if (const auto *Call = dyn_cast(V)) + if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT, Visited); Index: llvm/lib/Analysis/MemDepPrinter.cpp =================================================================== --- llvm/lib/Analysis/MemDepPrinter.cpp +++ llvm/lib/Analysis/MemDepPrinter.cpp @@ -13,7 +13,6 @@ #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/Passes.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Support/ErrorHandling.h" @@ -106,9 +105,9 @@ if (!Res.isNonLocal()) { Deps[Inst].insert(std::make_pair(getInstTypePair(Res), static_cast(nullptr))); - } else if (auto CS = CallSite(Inst)) { + } else if (auto *Call = dyn_cast(Inst)) { const MemoryDependenceResults::NonLocalDepInfo &NLDI = - MDA.getNonLocalCallDependency(CS); + MDA.getNonLocalCallDependency(Call); DepSet &InstDeps = Deps[Inst]; for (const NonLocalDepEntry &I : NLDI) { Index: llvm/lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- llvm/lib/Analysis/MemoryDependenceAnalysis.cpp +++ llvm/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -31,7 +31,6 @@ #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" @@ -182,8 +181,8 @@ } /// Private helper for finding the local dependencies of a call site. -MemDepResult MemoryDependenceResults::getCallSiteDependencyFrom( - CallSite CS, bool isReadOnlyCall, BasicBlock::iterator ScanIt, +MemDepResult MemoryDependenceResults::getCallDependencyFrom( + CallBase *Call, bool isReadOnlyCall, BasicBlock::iterator ScanIt, BasicBlock *BB) { unsigned Limit = BlockScanLimit; @@ -205,21 +204,21 @@ ModRefInfo MR = GetLocation(Inst, Loc, TLI); if (Loc.Ptr) { // A simple instruction. - if (isModOrRefSet(AA.getModRefInfo(CS, Loc))) + if (isModOrRefSet(AA.getModRefInfo(Call, Loc))) return MemDepResult::getClobber(Inst); continue; } - if (auto InstCS = CallSite(Inst)) { + if (auto *CallB = dyn_cast(Inst)) { // If these two calls do not interfere, look past it. - if (isNoModRef(AA.getModRefInfo(CS, InstCS))) { - // If the two calls are the same, return InstCS as a Def, so that - // CS can be found redundant and eliminated. + if (isNoModRef(AA.getModRefInfo(Call, CallB))) { + // If the two calls are the same, return Inst as a Def, so that + // Call can be found redundant and eliminated. if (isReadOnlyCall && !isModSet(MR) && - CS.getInstruction()->isIdenticalToWhenDefined(Inst)) + Call->isIdenticalToWhenDefined(CallB)) return MemDepResult::getDef(Inst); - // Otherwise if the two calls don't interact (e.g. InstCS is readnone) + // Otherwise if the two calls don't interact (e.g. CallB is readnone) // keep scanning. continue; } else @@ -750,11 +749,10 @@ LocalCache = getPointerDependencyFrom( MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst); - } else if (isa(QueryInst) || isa(QueryInst)) { - CallSite QueryCS(QueryInst); - bool isReadOnly = AA.onlyReadsMemory(QueryCS); - LocalCache = getCallSiteDependencyFrom( - QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent); + } else if (auto *QueryCall = dyn_cast(QueryInst)) { + bool isReadOnly = AA.onlyReadsMemory(QueryCall); + LocalCache = getCallDependencyFrom(QueryCall, isReadOnly, + ScanPos->getIterator(), QueryParent); } else // Non-memory instruction. LocalCache = MemDepResult::getUnknown(); @@ -780,11 +778,11 @@ #endif const MemoryDependenceResults::NonLocalDepInfo & -MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) { - assert(getDependency(QueryCS.getInstruction()).isNonLocal() && +MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) { + assert(getDependency(QueryCall).isNonLocal() && "getNonLocalCallDependency should only be used on calls with " "non-local deps!"); - PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()]; + PerInstNLInfo &CacheP = NonLocalDeps[QueryCall]; NonLocalDepInfo &Cache = CacheP.first; // This is the set of blocks that need to be recomputed. In the cached case, @@ -814,14 +812,14 @@ // << Cache.size() << " cached: " << *QueryInst; } else { // Seed DirtyBlocks with each of the preds of QueryInst's block. - BasicBlock *QueryBB = QueryCS.getInstruction()->getParent(); + BasicBlock *QueryBB = QueryCall->getParent(); for (BasicBlock *Pred : PredCache.get(QueryBB)) DirtyBlocks.push_back(Pred); ++NumUncacheNonLocal; } // isReadonlyCall - If this is a read-only call, we can be more aggressive. - bool isReadonlyCall = AA.onlyReadsMemory(QueryCS); + bool isReadonlyCall = AA.onlyReadsMemory(QueryCall); SmallPtrSet Visited; @@ -865,8 +863,8 @@ if (Instruction *Inst = ExistingResult->getResult().getInst()) { ScanPos = Inst->getIterator(); // We're removing QueryInst's use of Inst. - RemoveFromReverseMap(ReverseNonLocalDeps, Inst, - QueryCS.getInstruction()); + RemoveFromReverseMap(ReverseNonLocalDeps, Inst, + QueryCall); } } @@ -874,8 +872,7 @@ MemDepResult Dep; if (ScanPos != DirtyBB->begin()) { - Dep = - getCallSiteDependencyFrom(QueryCS, isReadonlyCall, ScanPos, DirtyBB); + Dep = getCallDependencyFrom(QueryCall, isReadonlyCall, ScanPos, DirtyBB); } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { // No dependence found. If this is the entry block of the function, it is // a clobber, otherwise it is unknown. @@ -897,7 +894,7 @@ // Keep the ReverseNonLocalDeps map up to date so we can efficiently // update this when we remove instructions. if (Instruction *Inst = Dep.getInst()) - ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction()); + ReverseNonLocalDeps[Inst].insert(QueryCall); } else { // If the block *is* completely transparent to the load, we need to check Index: llvm/lib/Analysis/MemoryLocation.cpp =================================================================== --- llvm/lib/Analysis/MemoryLocation.cpp +++ llvm/lib/Analysis/MemoryLocation.cpp @@ -121,15 +121,15 @@ return MemoryLocation(MI->getRawDest(), Size, AATags); } -MemoryLocation MemoryLocation::getForArgument(ImmutableCallSite CS, +MemoryLocation MemoryLocation::getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI) { AAMDNodes AATags; - CS->getAAMetadata(AATags); - const Value *Arg = CS.getArgument(ArgIdx); + Call->getAAMetadata(AATags); + const Value *Arg = Call->getArgOperand(ArgIdx); // We may be able to produce an exact size for known intrinsics. - if (const IntrinsicInst *II = dyn_cast(CS.getInstruction())) { + if (const IntrinsicInst *II = dyn_cast(Call)) { const DataLayout &DL = II->getModule()->getDataLayout(); switch (II->getIntrinsicID()) { @@ -178,18 +178,19 @@ // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 // whenever possible. LibFunc F; - if (TLI && CS.getCalledFunction() && - TLI->getLibFunc(*CS.getCalledFunction(), F) && + if (TLI && Call->getCalledFunction() && + TLI->getLibFunc(*Call->getCalledFunction(), F) && F == LibFunc_memset_pattern16 && TLI->has(F)) { assert((ArgIdx == 0 || ArgIdx == 1) && "Invalid argument index for memset_pattern16"); if (ArgIdx == 1) return MemoryLocation(Arg, 16, AATags); - if (const ConstantInt *LenCI = dyn_cast(CS.getArgument(2))) + if (const ConstantInt *LenCI = + dyn_cast(Call->getArgOperand(2))) return MemoryLocation(Arg, LenCI->getZExtValue(), AATags); } // FIXME: Handle memset_pattern4 and memset_pattern8 also. - return MemoryLocation(CS.getArgument(ArgIdx), LocationSize::unknown(), + return MemoryLocation(Call->getArgOperand(ArgIdx), LocationSize::unknown(), AATags); } Index: llvm/lib/Analysis/MemorySSA.cpp =================================================================== --- llvm/lib/Analysis/MemorySSA.cpp +++ llvm/lib/Analysis/MemorySSA.cpp @@ -30,7 +30,6 @@ #include "llvm/Config/llvm-config.h" #include "llvm/IR/AssemblyAnnotationWriter.h" #include "llvm/IR/BasicBlock.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instruction.h" @@ -131,9 +130,9 @@ : MemoryLocOrCall(MUD->getMemoryInst()) {} MemoryLocOrCall(Instruction *Inst) { - if (ImmutableCallSite(Inst)) { + if (auto *C = dyn_cast(Inst)) { IsCall = true; - CS = ImmutableCallSite(Inst); + Call = C; } else { IsCall = false; // There is no such thing as a memorylocation for a fence inst, and it is @@ -145,9 +144,9 @@ explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} - ImmutableCallSite getCS() const { + const CallBase *getCall() const { assert(IsCall); - return CS; + return Call; } MemoryLocation getLoc() const { @@ -162,16 +161,17 @@ if (!IsCall) return Loc == Other.Loc; - if (CS.getCalledValue() != Other.CS.getCalledValue()) + if (Call->getCalledValue() != Other.Call->getCalledValue()) return false; - return CS.arg_size() == Other.CS.arg_size() && - std::equal(CS.arg_begin(), CS.arg_end(), Other.CS.arg_begin()); + return Call->arg_size() == Other.Call->arg_size() && + std::equal(Call->arg_begin(), Call->arg_end(), + Other.Call->arg_begin()); } private: union { - ImmutableCallSite CS; + const CallBase *Call; MemoryLocation Loc; }; }; @@ -197,9 +197,9 @@ hash_code hash = hash_combine(MLOC.IsCall, DenseMapInfo::getHashValue( - MLOC.getCS().getCalledValue())); + MLOC.getCall()->getCalledValue())); - for (const Value *Arg : MLOC.getCS().args()) + for (const Value *Arg : MLOC.getCall()->args()) hash = hash_combine(hash, DenseMapInfo::getHashValue(Arg)); return hash; } @@ -258,7 +258,7 @@ AliasAnalysis &AA) { Instruction *DefInst = MD->getMemoryInst(); assert(DefInst && "Defining instruction not actually an instruction"); - ImmutableCallSite UseCS(UseInst); + const auto *UseCall = dyn_cast(UseInst); Optional AR; if (const IntrinsicInst *II = dyn_cast(DefInst)) { @@ -271,7 +271,7 @@ // context. switch (II->getIntrinsicID()) { case Intrinsic::lifetime_start: - if (UseCS) + if (UseCall) return {false, NoAlias}; AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc); return {AR != NoAlias, AR}; @@ -285,8 +285,8 @@ } } - if (UseCS) { - ModRefInfo I = AA.getModRefInfo(DefInst, UseCS); + if (UseCall) { + ModRefInfo I = AA.getModRefInfo(DefInst, UseCall); AR = isMustSet(I) ? MustAlias : MayAlias; return {isModOrRefSet(I), AR}; } @@ -336,7 +336,7 @@ UpwardsMemoryQuery() = default; UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) - : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { + : IsCall(isa(Inst)), Inst(Inst), OriginalAccess(Access) { if (!IsCall) StartingLoc = MemoryLocation::get(Inst); } @@ -2162,7 +2162,7 @@ // Conservatively, fences are always clobbers, so don't perform the walk if we // hit a fence. - if (!ImmutableCallSite(I) && I->isFenceLike()) + if (!isa(I) && I->isFenceLike()) return StartingUseOrDef; UpwardsMemoryQuery Q; @@ -2202,7 +2202,7 @@ // We can't sanely do anything with a fence, since they conservatively clobber // all memory, and have no locations to get pointers from to try to // disambiguate. - if (!ImmutableCallSite(I) && I->isFenceLike()) + if (!isa(I) && I->isFenceLike()) return StartingAccess; UpwardsMemoryQuery Q(I, StartingAccess); Index: llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp =================================================================== --- llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp +++ llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp @@ -106,12 +106,12 @@ return AAResultBase::getModRefBehavior(F); } -ModRefInfo ObjCARCAAResult::getModRefInfo(ImmutableCallSite CS, +ModRefInfo ObjCARCAAResult::getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) { if (!EnableARCOpts) - return AAResultBase::getModRefInfo(CS, Loc); + return AAResultBase::getModRefInfo(Call, Loc); - switch (GetBasicARCInstKind(CS.getInstruction())) { + switch (GetBasicARCInstKind(Call)) { case ARCInstKind::Retain: case ARCInstKind::RetainRV: case ARCInstKind::Autorelease: @@ -128,7 +128,7 @@ break; } - return AAResultBase::getModRefInfo(CS, Loc); + return AAResultBase::getModRefInfo(Call, Loc); } ObjCARCAAResult ObjCARCAA::run(Function &F, FunctionAnalysisManager &AM) { Index: llvm/lib/Analysis/ScopedNoAliasAA.cpp =================================================================== --- llvm/lib/Analysis/ScopedNoAliasAA.cpp +++ llvm/lib/Analysis/ScopedNoAliasAA.cpp @@ -95,39 +95,36 @@ return AAResultBase::alias(LocA, LocB); } -ModRefInfo ScopedNoAliasAAResult::getModRefInfo(ImmutableCallSite CS, +ModRefInfo ScopedNoAliasAAResult::getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) { if (!EnableScopedNoAlias) - return AAResultBase::getModRefInfo(CS, Loc); + return AAResultBase::getModRefInfo(Call, Loc); - if (!mayAliasInScopes(Loc.AATags.Scope, CS.getInstruction()->getMetadata( - LLVMContext::MD_noalias))) + if (!mayAliasInScopes(Loc.AATags.Scope, + Call->getMetadata(LLVMContext::MD_noalias))) return ModRefInfo::NoModRef; - if (!mayAliasInScopes( - CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope), - Loc.AATags.NoAlias)) + if (!mayAliasInScopes(Call->getMetadata(LLVMContext::MD_alias_scope), + Loc.AATags.NoAlias)) return ModRefInfo::NoModRef; - return AAResultBase::getModRefInfo(CS, Loc); + return AAResultBase::getModRefInfo(Call, Loc); } -ModRefInfo ScopedNoAliasAAResult::getModRefInfo(ImmutableCallSite CS1, - ImmutableCallSite CS2) { +ModRefInfo ScopedNoAliasAAResult::getModRefInfo(const CallBase *Call1, + const CallBase *Call2) { if (!EnableScopedNoAlias) - return AAResultBase::getModRefInfo(CS1, CS2); + return AAResultBase::getModRefInfo(Call1, Call2); - if (!mayAliasInScopes( - CS1.getInstruction()->getMetadata(LLVMContext::MD_alias_scope), - CS2.getInstruction()->getMetadata(LLVMContext::MD_noalias))) + if (!mayAliasInScopes(Call1->getMetadata(LLVMContext::MD_alias_scope), + Call2->getMetadata(LLVMContext::MD_noalias))) return ModRefInfo::NoModRef; - if (!mayAliasInScopes( - CS2.getInstruction()->getMetadata(LLVMContext::MD_alias_scope), - CS1.getInstruction()->getMetadata(LLVMContext::MD_noalias))) + if (!mayAliasInScopes(Call2->getMetadata(LLVMContext::MD_alias_scope), + Call1->getMetadata(LLVMContext::MD_noalias))) return ModRefInfo::NoModRef; - return AAResultBase::getModRefInfo(CS1, CS2); + return AAResultBase::getModRefInfo(Call1, Call2); } static void collectMDInDomain(const MDNode *List, const MDNode *Domain, Index: llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp =================================================================== --- llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp +++ llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp @@ -399,20 +399,20 @@ } FunctionModRefBehavior -TypeBasedAAResult::getModRefBehavior(ImmutableCallSite CS) { +TypeBasedAAResult::getModRefBehavior(const CallBase *Call) { if (!EnableTBAA) - return AAResultBase::getModRefBehavior(CS); + return AAResultBase::getModRefBehavior(Call); FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; // If this is an "immutable" type, we can assume the call doesn't write // to memory. - if (const MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) + if (const MDNode *M = Call->getMetadata(LLVMContext::MD_tbaa)) if ((!isStructPathTBAA(M) && TBAANode(M).isTypeImmutable()) || (isStructPathTBAA(M) && TBAAStructTagNode(M).isTypeImmutable())) Min = FMRB_OnlyReadsMemory; - return FunctionModRefBehavior(AAResultBase::getModRefBehavior(CS) & Min); + return FunctionModRefBehavior(AAResultBase::getModRefBehavior(Call) & Min); } FunctionModRefBehavior TypeBasedAAResult::getModRefBehavior(const Function *F) { @@ -420,33 +420,30 @@ return AAResultBase::getModRefBehavior(F); } -ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS, +ModRefInfo TypeBasedAAResult::getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) { if (!EnableTBAA) - return AAResultBase::getModRefInfo(CS, Loc); + return AAResultBase::getModRefInfo(Call, Loc); if (const MDNode *L = Loc.AATags.TBAA) - if (const MDNode *M = - CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) + if (const MDNode *M = Call->getMetadata(LLVMContext::MD_tbaa)) if (!Aliases(L, M)) return ModRefInfo::NoModRef; - return AAResultBase::getModRefInfo(CS, Loc); + return AAResultBase::getModRefInfo(Call, Loc); } -ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS1, - ImmutableCallSite CS2) { +ModRefInfo TypeBasedAAResult::getModRefInfo(const CallBase *Call1, + const CallBase *Call2) { if (!EnableTBAA) - return AAResultBase::getModRefInfo(CS1, CS2); + return AAResultBase::getModRefInfo(Call1, Call2); - if (const MDNode *M1 = - CS1.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) - if (const MDNode *M2 = - CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) + if (const MDNode *M1 = Call1->getMetadata(LLVMContext::MD_tbaa)) + if (const MDNode *M2 = Call2->getMetadata(LLVMContext::MD_tbaa)) if (!Aliases(M1, M2)) return ModRefInfo::NoModRef; - return AAResultBase::getModRefInfo(CS1, CS2); + return AAResultBase::getModRefInfo(Call1, Call2); } bool MDNode::isTBAAVtableAccess() const { Index: llvm/lib/Analysis/ValueTracking.cpp =================================================================== --- llvm/lib/Analysis/ValueTracking.cpp +++ llvm/lib/Analysis/ValueTracking.cpp @@ -2023,10 +2023,10 @@ if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) return true; - if (auto CS = ImmutableCallSite(V)) { - if (CS.isReturnNonNull()) + if (const auto *Call = dyn_cast(V)) { + if (Call->isReturnNonNull()) return true; - if (const auto *RP = getArgumentAliasingToReturnedPointer(CS)) + if (const auto *RP = getArgumentAliasingToReturnedPointer(Call)) return isKnownNonZero(RP, Depth, Q); } } @@ -3617,21 +3617,21 @@ return Len == ~0ULL ? 1 : Len; } -const Value *llvm::getArgumentAliasingToReturnedPointer(ImmutableCallSite CS) { - assert(CS && - "getArgumentAliasingToReturnedPointer only works on nonnull CallSite"); - if (const Value *RV = CS.getReturnedArgOperand()) +const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) { + assert(Call && + "getArgumentAliasingToReturnedPointer only works on nonnull calls"); + if (const Value *RV = Call->getReturnedArgOperand()) return RV; // This can be used only as a aliasing property. - if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CS)) - return CS.getArgOperand(0); + if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call)) + return Call->getArgOperand(0); return nullptr; } bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( - ImmutableCallSite CS) { - return CS.getIntrinsicID() == Intrinsic::launder_invariant_group || - CS.getIntrinsicID() == Intrinsic::strip_invariant_group; + const CallBase *Call) { + return Call->getIntrinsicID() == Intrinsic::launder_invariant_group || + Call->getIntrinsicID() == Intrinsic::strip_invariant_group; } /// \p PN defines a loop-variant pointer to an object. Check if the @@ -3679,7 +3679,7 @@ // An alloca can't be further simplified. return V; } else { - if (auto CS = CallSite(V)) { + if (auto *Call = dyn_cast(V)) { // CaptureTracking can know about special capturing properties of some // intrinsics like launder.invariant.group, that can't be expressed with // the attributes, but have properties like returning aliasing pointer. @@ -3689,7 +3689,7 @@ // because it should be in sync with CaptureTracking. Not using it may // cause weird miscompilations where 2 aliasing pointers are assumed to // noalias. - if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) { + if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) { V = RP; continue; } Index: llvm/lib/Transforms/IPO/FunctionAttrs.cpp =================================================================== --- llvm/lib/Transforms/IPO/FunctionAttrs.cpp +++ llvm/lib/Transforms/IPO/FunctionAttrs.cpp @@ -130,16 +130,15 @@ // Some instructions can be ignored even if they read or write memory. // Detect these now, skipping to the next instruction if one is found. - CallSite CS(cast(I)); - if (CS) { + if (auto *Call = dyn_cast(I)) { // Ignore calls to functions in the same SCC, as long as the call sites // don't have operand bundles. Calls with operand bundles are allowed to // have memory effects not described by the memory effects of the call // target. - if (!CS.hasOperandBundles() && CS.getCalledFunction() && - SCCNodes.count(CS.getCalledFunction())) + if (!Call->hasOperandBundles() && Call->getCalledFunction() && + SCCNodes.count(Call->getCalledFunction())) continue; - FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS); + FunctionModRefBehavior MRB = AAR.getModRefBehavior(Call); ModRefInfo MRI = createModRefInfo(MRB); // If the call doesn't access memory, we're done. @@ -158,7 +157,7 @@ // Check whether all pointer arguments point to local memory, and // ignore calls that only access local memory. - for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); + for (CallSite::arg_iterator CI = Call->arg_begin(), CE = Call->arg_end(); CI != CE; ++CI) { Value *Arg = *CI; if (!Arg->getType()->isPtrOrPtrVectorTy()) Index: llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp =================================================================== --- llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp +++ llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp @@ -45,18 +45,15 @@ default: break; } - ImmutableCallSite CS(Inst); - assert(CS && "Only calls can alter reference counts!"); + const auto *Call = cast(Inst); // See if AliasAnalysis can help us with the call. - FunctionModRefBehavior MRB = PA.getAA()->getModRefBehavior(CS); + FunctionModRefBehavior MRB = PA.getAA()->getModRefBehavior(Call); if (AliasAnalysis::onlyReadsMemory(MRB)) return false; if (AliasAnalysis::onlyAccessesArgPointees(MRB)) { const DataLayout &DL = Inst->getModule()->getDataLayout(); - for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); - I != E; ++I) { - const Value *Op = *I; + for (const Value *Op : Call->args()) { if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op, DL)) return true; Index: llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp =================================================================== --- llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -834,7 +834,7 @@ continue; } - if (auto CS = CallSite(&*BBI)) { + if (auto *Call = dyn_cast(&*BBI)) { // Remove allocation function calls from the list of dead stack objects; // there can't be any references before the definition. if (isAllocLikeFn(&*BBI, TLI)) @@ -842,15 +842,15 @@ // If this call does not access memory, it can't be loading any of our // pointers. - if (AA->doesNotAccessMemory(CS)) + if (AA->doesNotAccessMemory(Call)) continue; // If the call might load from any of our allocas, then any store above // the call is live. DeadStackObjects.remove_if([&](Value *I) { // See if the call site touches the value. - return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI, - BB.getParent()))); + return isRefSet(AA->getModRefInfo( + Call, I, getPointerSize(I, DL, *TLI, BB.getParent()))); }); // If all of the allocas were clobbered by the call then we're not going Index: llvm/lib/Transforms/Scalar/GVN.cpp =================================================================== --- llvm/lib/Transforms/Scalar/GVN.cpp +++ llvm/lib/Transforms/Scalar/GVN.cpp @@ -437,7 +437,7 @@ // Non-local case. const MemoryDependenceResults::NonLocalDepInfo &deps = - MD->getNonLocalCallDependency(CallSite(C)); + MD->getNonLocalCallDependency(C); // FIXME: Move the checking logic to MemDep! CallInst* cdep = nullptr; Index: llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp =================================================================== --- llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp +++ llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp @@ -360,10 +360,11 @@ bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) { assert(I != nullptr && "Null instruction found!"); // Check function call safety - if (isa(I) && !AA->doesNotAccessMemory(CallSite(I))) { - LLVM_DEBUG(dbgs() << " Unsafe call site found.\n"); - return false; - } + if (auto *Call = dyn_cast(I)) + if (!AA->doesNotAccessMemory(Call)) { + LLVM_DEBUG(dbgs() << " Unsafe call site found.\n"); + return false; + } // Avoid loops with possiblity of throw if (I->mayThrow()) { LLVM_DEBUG(dbgs() << " May throw instruction found in loop body\n"); Index: llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp =================================================================== --- llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -546,8 +546,8 @@ // Memory locations of lifted instructions. SmallVector MemLocs{StoreLoc}; - // Lifted callsites. - SmallVector CallSites; + // Lifted calls. + SmallVector Calls; const MemoryLocation LoadLoc = MemoryLocation::get(LI); @@ -565,10 +565,9 @@ }); if (!NeedLift) - NeedLift = - llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) { - return isModOrRefSet(AA.getModRefInfo(C, CS)); - }); + NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) { + return isModOrRefSet(AA.getModRefInfo(C, Call)); + }); } if (!NeedLift) @@ -579,12 +578,12 @@ // none of them may modify its source. if (isModSet(AA.getModRefInfo(C, LoadLoc))) return false; - else if (auto CS = ImmutableCallSite(C)) { + else if (const auto *Call = dyn_cast(C)) { // If we can't lift this before P, it's game over. - if (isModOrRefSet(AA.getModRefInfo(P, CS))) + if (isModOrRefSet(AA.getModRefInfo(P, Call))) return false; - CallSites.push_back(CS); + Calls.push_back(Call); } else if (isa(C) || isa(C) || isa(C)) { // If we can't lift this before P, it's game over. auto ML = MemoryLocation::get(C); Index: llvm/lib/Transforms/Scalar/Sink.cpp =================================================================== --- llvm/lib/Transforms/Scalar/Sink.cpp +++ llvm/lib/Transforms/Scalar/Sink.cpp @@ -76,14 +76,14 @@ Inst->mayThrow()) return false; - if (auto CS = CallSite(Inst)) { + if (auto *Call = dyn_cast(Inst)) { // Convergent operations cannot be made control-dependent on additional // values. - if (CS.hasFnAttr(Attribute::Convergent)) + if (Call->hasFnAttr(Attribute::Convergent)) return false; for (Instruction *S : Stores) - if (isModSet(AA.getModRefInfo(S, CS))) + if (isModSet(AA.getModRefInfo(S, Call))) return false; } Index: llvm/lib/Transforms/Utils/InlineFunction.cpp =================================================================== --- llvm/lib/Transforms/Utils/InlineFunction.cpp +++ llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -987,22 +987,22 @@ PtrArgs.push_back(CXI->getPointerOperand()); else if (const AtomicRMWInst *RMWI = dyn_cast(I)) PtrArgs.push_back(RMWI->getPointerOperand()); - else if (ImmutableCallSite ICS = ImmutableCallSite(I)) { + else if (const auto *Call = dyn_cast(I)) { // If we know that the call does not access memory, then we'll still // know that about the inlined clone of this call site, and we don't // need to add metadata. - if (ICS.doesNotAccessMemory()) + if (Call->doesNotAccessMemory()) continue; IsFuncCall = true; if (CalleeAAR) { - FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS); + FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call); if (MRB == FMRB_OnlyAccessesArgumentPointees || MRB == FMRB_OnlyReadsArgumentPointees) IsArgMemOnlyCall = true; } - for (Value *Arg : ICS.args()) { + for (Value *Arg : Call->args()) { // We need to check the underlying objects of all arguments, not just // the pointer arguments, because we might be passing pointers as // integers, etc.