Index: include/llvm/IR/Instructions.h =================================================================== --- include/llvm/IR/Instructions.h +++ include/llvm/IR/Instructions.h @@ -5043,6 +5043,26 @@ } }; +/// A helper function that returns the pointer operand of a load or store +/// instruction. Returns nullptr if not load or store. +inline Value *getLoadStorePointerOperand(Value *V) { + if (auto *Load = dyn_cast(V)) + return Load->getPointerOperand(); + if (auto *Store = dyn_cast(V)) + return Store->getPointerOperand(); + return nullptr; +} + +/// A helper function that returns the pointer operand of a load, store +/// or GEP instruction. Returns nullptr if not load, store, or GEP. +inline Value *getPointerOperand(Value *V) { + if (auto *Ptr = getLoadStorePointerOperand(V)) + return Ptr; + if (auto *Gep = dyn_cast(V)) + return Gep->getPointerOperand(); + return nullptr; +} + } // end namespace llvm #endif // LLVM_IR_INSTRUCTIONS_H Index: lib/Analysis/Delinearization.cpp =================================================================== --- lib/Analysis/Delinearization.cpp +++ lib/Analysis/Delinearization.cpp @@ -69,16 +69,6 @@ return false; } -static Value *getPointerOperand(Instruction &Inst) { - if (LoadInst *Load = dyn_cast(&Inst)) - return Load->getPointerOperand(); - else if (StoreInst *Store = dyn_cast(&Inst)) - return Store->getPointerOperand(); - else if (GetElementPtrInst *Gep = dyn_cast(&Inst)) - return Gep->getPointerOperand(); - return nullptr; -} - void Delinearization::print(raw_ostream &O, const Module *) const { O << "Delinearization on function " << F->getName() << ":\n"; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { @@ -93,7 +83,7 @@ // Delinearize the memory access as analyzed in all the surrounding loops. // Do not analyze memory accesses outside loops. for (Loop *L = LI->getLoopFor(BB); L != nullptr; L = L->getParentLoop()) { - const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(*Inst), L); + const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(Inst), L); const SCEVUnknown *BasePointer = dyn_cast(SE->getPointerBase(AccessFn)); Index: lib/Analysis/DependenceAnalysis.cpp =================================================================== --- lib/Analysis/DependenceAnalysis.cpp +++ lib/Analysis/DependenceAnalysis.cpp @@ -643,17 +643,6 @@ } -static -Value *getPointerOperand(Instruction *I) { - if (LoadInst *LI = dyn_cast(I)) - return LI->getPointerOperand(); - if (StoreInst *SI = dyn_cast(I)) - return SI->getPointerOperand(); - llvm_unreachable("Value is not load or store instruction"); - return nullptr; -} - - // Examines the loop nesting of the Src and Dst // instructions and establishes their shared loops. Sets the variables // CommonLevels, SrcLevels, and MaxLevels. @@ -3176,8 +3165,10 @@ /// for each loop level. bool DependenceInfo::tryDelinearize(Instruction *Src, Instruction *Dst, SmallVectorImpl &Pair) { - Value *SrcPtr = getPointerOperand(Src); - Value *DstPtr = getPointerOperand(Dst); + assert(isLoadOrStore(Src) && "instruction is not load or store"); + assert(isLoadOrStore(Dst) && "instruction is not load or store"); + Value *SrcPtr = getLoadStorePointerOperand(Src); + Value *DstPtr = getLoadStorePointerOperand(Dst); Loop *SrcLoop = LI->getLoopFor(Src->getParent()); Loop *DstLoop = LI->getLoopFor(Dst->getParent()); @@ -3302,8 +3293,10 @@ return make_unique(Src, Dst); } - Value *SrcPtr = getPointerOperand(Src); - Value *DstPtr = getPointerOperand(Dst); + assert(isLoadOrStore(Src) && "instruction is not load or store"); + assert(isLoadOrStore(Dst) && "instruction is not load or store"); + Value *SrcPtr = getLoadStorePointerOperand(Src); + Value *DstPtr = getLoadStorePointerOperand(Dst); switch (underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr, SrcPtr)) { @@ -3720,8 +3713,8 @@ assert(Dst->mayReadFromMemory() || Dst->mayWriteToMemory()); assert(isLoadOrStore(Src)); assert(isLoadOrStore(Dst)); - Value *SrcPtr = getPointerOperand(Src); - Value *DstPtr = getPointerOperand(Dst); + Value *SrcPtr = getLoadStorePointerOperand(Src); + Value *DstPtr = getLoadStorePointerOperand(Dst); assert(underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr, SrcPtr) == MustAlias); Index: lib/Analysis/LoopAccessAnalysis.cpp =================================================================== --- lib/Analysis/LoopAccessAnalysis.cpp +++ lib/Analysis/LoopAccessAnalysis.cpp @@ -1087,16 +1087,6 @@ return Stride; } -/// Take the pointer operand from the Load/Store instruction. -/// Returns NULL if this is not a valid Load/Store instruction. -static Value *getPointerOperand(Value *I) { - if (auto *LI = dyn_cast(I)) - return LI->getPointerOperand(); - if (auto *SI = dyn_cast(I)) - return SI->getPointerOperand(); - return nullptr; -} - /// Take the address space operand from the Load/Store instruction. /// Returns -1 if this is not a valid Load/Store instruction. static unsigned getAddressSpaceOperand(Value *I) { @@ -1110,8 +1100,8 @@ /// Returns true if the memory operations \p A and \p B are consecutive. bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType) { - Value *PtrA = getPointerOperand(A); - Value *PtrB = getPointerOperand(B); + Value *PtrA = getLoadStorePointerOperand(A); + Value *PtrB = getLoadStorePointerOperand(B); unsigned ASA = getAddressSpaceOperand(A); unsigned ASB = getAddressSpaceOperand(B); Index: lib/Transforms/Scalar/EarlyCSE.cpp =================================================================== --- lib/Transforms/Scalar/EarlyCSE.cpp +++ lib/Transforms/Scalar/EarlyCSE.cpp @@ -532,12 +532,7 @@ Value *getPointerOperand() const { if (IsTargetMemInst) return Info.PtrVal; - if (LoadInst *LI = dyn_cast(Inst)) { - return LI->getPointerOperand(); - } else if (StoreInst *SI = dyn_cast(Inst)) { - return SI->getPointerOperand(); - } - return nullptr; + return getLoadStorePointerOperand(Inst); } bool mayReadFromMemory() const { Index: lib/Transforms/Vectorize/LoadStoreVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -118,8 +118,6 @@ bool run(); private: - Value *getPointerOperand(Value *I) const; - GetElementPtrInst *getSourceGEP(Value *Src) const; unsigned getPointerAddressSpace(Value *I); @@ -271,14 +269,6 @@ return Changed; } -Value *Vectorizer::getPointerOperand(Value *I) const { - if (LoadInst *LI = dyn_cast(I)) - return LI->getPointerOperand(); - if (StoreInst *SI = dyn_cast(I)) - return SI->getPointerOperand(); - return nullptr; -} - unsigned Vectorizer::getPointerAddressSpace(Value *I) { if (LoadInst *L = dyn_cast(I)) return L->getPointerAddressSpace(); @@ -292,7 +282,7 @@ // and without casts. // TODO: a stride set by the add instruction below can match the difference // in pointee type size here. Currently it will not be vectorized. - Value *SrcPtr = getPointerOperand(Src); + Value *SrcPtr = getLoadStorePointerOperand(Src); Value *SrcBase = SrcPtr->stripPointerCasts(); if (DL.getTypeStoreSize(SrcPtr->getType()->getPointerElementType()) == DL.getTypeStoreSize(SrcBase->getType()->getPointerElementType())) @@ -302,8 +292,8 @@ // FIXME: Merge with llvm::isConsecutiveAccess bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) { - Value *PtrA = getPointerOperand(A); - Value *PtrB = getPointerOperand(B); + Value *PtrA = getLoadStorePointerOperand(A); + Value *PtrB = getLoadStorePointerOperand(B); unsigned ASA = getPointerAddressSpace(A); unsigned ASB = getPointerAddressSpace(B); @@ -482,7 +472,7 @@ void Vectorizer::eraseInstructions(ArrayRef Chain) { SmallVector Instrs; for (Instruction *I : Chain) { - Value *PtrOperand = getPointerOperand(I); + Value *PtrOperand = getLoadStorePointerOperand(I); assert(PtrOperand && "Instruction must have a pointer operand."); Instrs.push_back(I); if (GetElementPtrInst *GEP = dyn_cast(PtrOperand)) @@ -592,10 +582,10 @@ dbgs() << "LSV: Found alias:\n" " Aliasing instruction and pointer:\n" << " " << *MemInstr << '\n' - << " " << *getPointerOperand(MemInstr) << '\n' + << " " << *getLoadStorePointerOperand(MemInstr) << '\n' << " Aliased instruction and pointer:\n" << " " << *ChainInstr << '\n' - << " " << *getPointerOperand(ChainInstr) << '\n'; + << " " << *getLoadStorePointerOperand(ChainInstr) << '\n'; }); // Save this aliasing memory instruction as a barrier, but allow other // instructions that precede the barrier to be vectorized with this one. Index: lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/LoopVectorize.cpp +++ lib/Transforms/Vectorize/LoopVectorize.cpp @@ -296,16 +296,6 @@ // in the project. They can be effectively organized in a common Load/Store // utilities unit. -/// A helper function that returns the pointer operand of a load or store -/// instruction. -static Value *getPointerOperand(Value *I) { - if (auto *LI = dyn_cast(I)) - return LI->getPointerOperand(); - if (auto *SI = dyn_cast(I)) - return SI->getPointerOperand(); - return nullptr; -} - /// A helper function that returns the type of loaded or stored value. static Type *getMemInstValueType(Value *I) { assert((isa(I) || isa(I)) && @@ -2860,7 +2850,7 @@ return; const DataLayout &DL = Instr->getModule()->getDataLayout(); - Value *Ptr = getPointerOperand(Instr); + Value *Ptr = getLoadStorePointerOperand(Instr); // Prepare for the vector type of the interleaved load/store. Type *ScalarTy = getMemInstValueType(Instr); @@ -3002,7 +2992,7 @@ Type *ScalarDataTy = getMemInstValueType(Instr); Type *DataTy = VectorType::get(ScalarDataTy, VF); - Value *Ptr = getPointerOperand(Instr); + Value *Ptr = getLoadStorePointerOperand(Instr); unsigned Alignment = getMemInstAlignment(Instr); // An alignment of 0 means target abi alignment. We need to use the scalar's // target abi alignment in such a case. @@ -4797,7 +4787,7 @@ continue; for (Instruction &I : *BB) - if (auto *Ptr = getPointerOperand(&I)) + if (auto *Ptr = getLoadStorePointerOperand(&I)) SafePointes.insert(Ptr); } @@ -5248,7 +5238,7 @@ if (auto *Store = dyn_cast(MemAccess)) if (Ptr == Store->getValueOperand()) return WideningDecision == CM_Scalarize; - assert(Ptr == getPointerOperand(MemAccess) && + assert(Ptr == getLoadStorePointerOperand(MemAccess) && "Ptr is neither a value or pointer operand"); return WideningDecision != CM_GatherScatter; }; @@ -5416,7 +5406,7 @@ case Instruction::Store: { if (!Legal->isMaskRequired(I)) return false; - auto *Ptr = getPointerOperand(I); + auto *Ptr = getLoadStorePointerOperand(I); auto *Ty = getMemInstValueType(I); return isa(I) ? !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty)) @@ -5438,7 +5428,7 @@ StoreInst *SI = dyn_cast(I); assert((LI || SI) && "Invalid memory instruction"); - auto *Ptr = getPointerOperand(I); + auto *Ptr = getLoadStorePointerOperand(I); // In order to be widened, the pointer should be consecutive, first of all. if (!Legal->isConsecutivePtr(Ptr)) @@ -5524,7 +5514,7 @@ for (auto *BB : TheLoop->blocks()) for (auto &I : *BB) { // If there's no pointer operand, there's nothing to do. - auto *Ptr = dyn_cast_or_null(getPointerOperand(&I)); + auto *Ptr = dyn_cast_or_null(getLoadStorePointerOperand(&I)); if (!Ptr) continue; @@ -5532,7 +5522,7 @@ // pointer operand. auto UsersAreMemAccesses = llvm::all_of(Ptr->users(), [&](User *U) -> bool { - return getPointerOperand(U) == Ptr; + return getLoadStorePointerOperand(U) == Ptr; }); // Ensure the memory instruction will not be scalarized or used by @@ -5572,7 +5562,8 @@ if (llvm::all_of(OI->users(), [&](User *U) -> bool { auto *J = cast(U); return !TheLoop->contains(J) || Worklist.count(J) || - (OI == getPointerOperand(J) && isUniformDecision(J, VF)); + (OI == getLoadStorePointerOperand(J) && + isUniformDecision(J, VF)); })) { Worklist.insert(OI); DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); @@ -5583,7 +5574,7 @@ // Returns true if Ptr is the pointer operand of a memory access instruction // I, and I is known to not require scalarization. auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { - return getPointerOperand(I) == Ptr && isUniformDecision(I, VF); + return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); }; // For an instruction to be added into Worklist above, all its users inside @@ -5744,7 +5735,7 @@ if (!LI && !SI) continue; - Value *Ptr = getPointerOperand(&I); + Value *Ptr = getLoadStorePointerOperand(&I); // We don't check wrapping here because we don't know yet if Ptr will be // part of a full group or a group with gaps. Checking wrapping for all // pointers (even those that end up in groups with no gaps) will be overly @@ -5994,7 +5985,7 @@ // So we check only group member 0 (which is always guaranteed to exist), // and group member Factor - 1; If the latter doesn't exist we rely on // peeling (if it is a non-reveresed accsess -- see Case 3). - Value *FirstMemberPtr = getPointerOperand(Group->getMember(0)); + Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) { DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " @@ -6004,7 +5995,7 @@ } Instruction *LastMember = Group->getMember(Group->getFactor() - 1); if (LastMember) { - Value *LastMemberPtr = getPointerOperand(LastMember); + Value *LastMemberPtr = getLoadStorePointerOperand(LastMember); if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) { DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " @@ -6824,7 +6815,7 @@ unsigned Alignment = getMemInstAlignment(I); unsigned AS = getMemInstAddressSpace(I); - Value *Ptr = getPointerOperand(I); + Value *Ptr = getLoadStorePointerOperand(I); Type *PtrTy = ToVectorTy(Ptr->getType(), VF); // Figure out whether the access is strided and get the stride value @@ -6862,7 +6853,7 @@ Type *ValTy = getMemInstValueType(I); Type *VectorTy = ToVectorTy(ValTy, VF); unsigned Alignment = getMemInstAlignment(I); - Value *Ptr = getPointerOperand(I); + Value *Ptr = getLoadStorePointerOperand(I); unsigned AS = getMemInstAddressSpace(I); int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); @@ -6898,7 +6889,7 @@ Type *ValTy = getMemInstValueType(I); Type *VectorTy = ToVectorTy(ValTy, VF); unsigned Alignment = getMemInstAlignment(I); - Value *Ptr = getPointerOperand(I); + Value *Ptr = getLoadStorePointerOperand(I); return TTI.getAddressComputationCost(VectorTy) + TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, @@ -6982,7 +6973,7 @@ for (BasicBlock *BB : TheLoop->blocks()) { // For each instruction in the old loop. for (Instruction &I : *BB) { - Value *Ptr = getPointerOperand(&I); + Value *Ptr = getLoadStorePointerOperand(&I); if (!Ptr) continue; @@ -6998,7 +6989,8 @@ // We assume that widening is the best solution when possible. if (memoryInstructionCanBeWidened(&I, VF)) { unsigned Cost = getConsecutiveMemOpCost(&I, VF); - int ConsecutiveStride = Legal->isConsecutivePtr(getPointerOperand(&I)); + int ConsecutiveStride = + Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && "Expected consecutive stride."); InstWidening Decision = @@ -7068,7 +7060,7 @@ for (BasicBlock *BB : TheLoop->blocks()) for (Instruction &I : *BB) { Instruction *PtrDef = - dyn_cast_or_null(getPointerOperand(&I)); + dyn_cast_or_null(getLoadStorePointerOperand(&I)); if (PtrDef && TheLoop->contains(PtrDef) && getWideningDecision(&I, VF) != CM_GatherScatter) AddrDefs.insert(PtrDef); @@ -7382,7 +7374,7 @@ bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { // Check if the pointer operand of a load or store instruction is // consecutive. - if (auto *Ptr = getPointerOperand(Inst)) + if (auto *Ptr = getLoadStorePointerOperand(Inst)) return Legal->isConsecutivePtr(Ptr); return false; }