diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -4976,7 +4976,7 @@ /// If we find an obviously non-foldable instruction, return true. /// Add accessed addresses and types to MemoryUses. static bool FindAllMemoryUses( - Instruction *I, SmallVectorImpl> &MemoryUses, + Instruction *I, SmallVectorImpl> &MemoryUses, SmallPtrSetImpl &ConsideredInsts, const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, unsigned &SeenInsts) { @@ -4997,28 +4997,28 @@ Instruction *UserI = cast(U.getUser()); if (LoadInst *LI = dyn_cast(UserI)) { - MemoryUses.push_back({U.get(), LI->getType()}); + MemoryUses.push_back({&U, LI->getType()}); continue; } if (StoreInst *SI = dyn_cast(UserI)) { if (U.getOperandNo() != StoreInst::getPointerOperandIndex()) return true; // Storing addr, not into addr. - MemoryUses.push_back({U.get(), SI->getValueOperand()->getType()}); + MemoryUses.push_back({&U, SI->getValueOperand()->getType()}); continue; } if (AtomicRMWInst *RMW = dyn_cast(UserI)) { if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex()) return true; // Storing addr, not into addr. - MemoryUses.push_back({U.get(), RMW->getValOperand()->getType()}); + MemoryUses.push_back({&U, RMW->getValOperand()->getType()}); continue; } if (AtomicCmpXchgInst *CmpX = dyn_cast(UserI)) { if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex()) return true; // Storing addr, not into addr. - MemoryUses.push_back({U.get(), CmpX->getCompareOperand()->getType()}); + MemoryUses.push_back({&U, CmpX->getCompareOperand()->getType()}); continue; } @@ -5051,7 +5051,7 @@ } static bool FindAllMemoryUses( - Instruction *I, SmallVectorImpl> &MemoryUses, + Instruction *I, SmallVectorImpl> &MemoryUses, const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { unsigned SeenInsts = 0; @@ -5142,7 +5142,7 @@ // we can remove the addressing mode and effectively trade one live register // for another (at worst.) In this context, folding an addressing mode into // the use is just a particularly nice way of sinking it. - SmallVector, 16> MemoryUses; + SmallVector, 16> MemoryUses; if (FindAllMemoryUses(I, MemoryUses, TLI, TRI, OptSize, PSI, BFI)) return false; // Has a non-memory, non-foldable use! @@ -5156,8 +5156,9 @@ // growth since most architectures have some reasonable small and fast way to // compute an effective address. (i.e LEA on x86) SmallVector MatchedAddrModeInsts; - for (const std::pair &Pair : MemoryUses) { - Value *Address = Pair.first; + for (const std::pair &Pair : MemoryUses) { + Value *Address = Pair.first->get(); + Instruction *UserI = cast(Pair.first->getUser()); Type *AddressAccessTy = Pair.second; unsigned AS = Address->getType()->getPointerAddressSpace(); @@ -5170,7 +5171,7 @@ TypePromotionTransaction::ConstRestorationPt LastKnownGood = TPT.getRestorationPoint(); AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn, - AddressAccessTy, AS, MemoryInst, Result, + AddressAccessTy, AS, UserI, Result, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, BFI); Matcher.IgnoreProfitability = true;