Index: llvm/trunk/include/llvm/Target/TargetInstrInfo.h =================================================================== --- llvm/trunk/include/llvm/Target/TargetInstrInfo.h +++ llvm/trunk/include/llvm/Target/TargetInstrInfo.h @@ -711,20 +711,22 @@ /// Target-dependent implementation for foldMemoryOperand. /// Target-independent code in foldMemoryOperand will /// take care of adding a MachineMemOperand to the newly created instruction. - virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - ArrayRef Ops, - int FrameIndex) const { + /// The instruction and any auxiliary instructions necessary will be inserted + /// at InsertPt. + virtual MachineInstr *foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex) const { return nullptr; } /// Target-dependent implementation for foldMemoryOperand. /// Target-independent code in foldMemoryOperand will /// take care of adding a MachineMemOperand to the newly created instruction. - virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - ArrayRef Ops, - MachineInstr *LoadMI) const { + /// The instruction and any auxiliary instructions necessary will be inserted + /// at InsertPt. + virtual MachineInstr *foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { return nullptr; } Index: llvm/trunk/lib/CodeGen/TargetInstrInfo.cpp =================================================================== --- llvm/trunk/lib/CodeGen/TargetInstrInfo.cpp +++ llvm/trunk/lib/CodeGen/TargetInstrInfo.cpp @@ -471,11 +471,13 @@ MI->getOpcode() == TargetOpcode::PATCHPOINT) { // Fold stackmap/patchpoint. NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); + if (NewMI) + MBB->insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI); } - + if (NewMI) { NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); // Add a memory operand, foldMemoryOperandImpl doesn't do that. @@ -493,8 +495,7 @@ MFI.getObjectAlignment(FI)); NewMI->addMemOperand(MF, MMO); - // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI. - return MBB->insert(MI, NewMI); + return NewMI; } // Straight COPY may fold as load/store. @@ -539,15 +540,15 @@ isLoadFromStackSlot(LoadMI, FrameIndex)) { // Fold stackmap/patchpoint. NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); + if (NewMI) + NewMI = MBB.insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI); } if (!NewMI) return nullptr; - NewMI = MBB.insert(MI, NewMI); - // Copy the memoperands from the load to the folded instruction. if (MI->memoperands_empty()) { NewMI->setMemRefs(LoadMI->memoperands_begin(), Index: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.h =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.h +++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.h @@ -131,6 +131,7 @@ using TargetInstrInfo::foldMemoryOperandImpl; MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex) const override; bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, Index: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2066,10 +2066,9 @@ .setMIFlag(Flag); } -MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - ArrayRef Ops, - int FrameIndex) const { +MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex) const { // This is a bit of a hack. Consider this instruction: // // %vreg0 = COPY %SP; GPR64all:%vreg0 Index: llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.h =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.h +++ llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.h @@ -114,10 +114,12 @@ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex) const override; MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const override { return nullptr; } Index: llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -779,10 +779,9 @@ return false; } -MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - ArrayRef Ops, - int FI) const { +MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FI) const { // Hexagon_TODO: Implement. return nullptr; } Index: llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h =================================================================== --- llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h +++ llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h @@ -87,9 +87,11 @@ protected: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex) const override; MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const override; public: Index: llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp +++ llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp @@ -152,17 +152,15 @@ return true; } -MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - ArrayRef Ops, - int FrameIndex) const { +MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex) const { // TODO: Implement this function return nullptr; } -MachineInstr * -AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, - ArrayRef Ops, - MachineInstr *LoadMI) const { +MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { // TODO: Implement this function return nullptr; } Index: llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.h =================================================================== --- llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.h +++ llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.h @@ -187,9 +187,11 @@ LiveVariables *LV) const override; MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex) const override; MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const override; bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override; bool ReverseBranchCondition(SmallVectorImpl &Cond) const Index: llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -752,10 +752,9 @@ return nullptr; } -MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - ArrayRef Ops, - int FrameIndex) const { +MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); unsigned Size = MFI->getObjectSize(FrameIndex); unsigned Opcode = MI->getOpcode(); @@ -765,9 +764,11 @@ isInt<8>(MI->getOperand(2).getImm()) && !MI->getOperand(3).getReg()) { // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST - return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::AGSI)) - .addFrameIndex(FrameIndex).addImm(0) - .addImm(MI->getOperand(2).getImm()); + return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), + get(SystemZ::AGSI)) + .addFrameIndex(FrameIndex) + .addImm(0) + .addImm(MI->getOperand(2).getImm()); } return nullptr; } @@ -786,9 +787,11 @@ isInt<8>(MI->getOperand(2).getImm())) { // A(G)HI %reg, CONST -> A(G)SI %mem, CONST Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI); - return BuildMI(MF, MI->getDebugLoc(), get(Opcode)) - .addFrameIndex(FrameIndex).addImm(0) - .addImm(MI->getOperand(2).getImm()); + return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), + get(Opcode)) + .addFrameIndex(FrameIndex) + .addImm(0) + .addImm(MI->getOperand(2).getImm()); } if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) { @@ -798,17 +801,23 @@ // source register instead. if (OpNum == 0) { unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; - return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode)) - .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex) - .addImm(0).addReg(0); + return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), + get(StoreOpcode)) + .addOperand(MI->getOperand(1)) + .addFrameIndex(FrameIndex) + .addImm(0) + .addReg(0); } // If we're spilling the source of an LDGR or LGDR, load the // destination register instead. if (OpNum == 1) { unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; unsigned Dest = MI->getOperand(0).getReg(); - return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest) - .addFrameIndex(FrameIndex).addImm(0).addReg(0); + return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), + get(LoadOpcode), Dest) + .addFrameIndex(FrameIndex) + .addImm(0) + .addReg(0); } } @@ -830,17 +839,25 @@ if (MMO->getSize() == Size && !MMO->isVolatile()) { // Handle conversion of loads. if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) { - return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) - .addFrameIndex(FrameIndex).addImm(0).addImm(Size) - .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) - .addMemOperand(MMO); + return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), + get(SystemZ::MVC)) + .addFrameIndex(FrameIndex) + .addImm(0) + .addImm(Size) + .addOperand(MI->getOperand(1)) + .addImm(MI->getOperand(2).getImm()) + .addMemOperand(MMO); } // Handle conversion of stores. if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) { - return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) - .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) - .addImm(Size).addFrameIndex(FrameIndex).addImm(0) - .addMemOperand(MMO); + return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), + get(SystemZ::MVC)) + .addOperand(MI->getOperand(1)) + .addImm(MI->getOperand(2).getImm()) + .addImm(Size) + .addFrameIndex(FrameIndex) + .addImm(0) + .addMemOperand(MMO); } } } @@ -856,7 +873,8 @@ assert(AccessBytes != 0 && "Size of access should be known"); assert(AccessBytes <= Size && "Access outside the frame index"); uint64_t Offset = Size - AccessBytes; - MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode)); + MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, + MI->getDebugLoc(), get(MemOpcode)); for (unsigned I = 0; I < OpNum; ++I) MIB.addOperand(MI->getOperand(I)); MIB.addFrameIndex(FrameIndex).addImm(Offset); @@ -869,10 +887,9 @@ return nullptr; } -MachineInstr * -SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, - ArrayRef Ops, - MachineInstr *LoadMI) const { +MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { return nullptr; } Index: llvm/trunk/lib/Target/X86/X86FastISel.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86FastISel.cpp +++ llvm/trunk/lib/Target/X86/X86FastISel.cpp @@ -3530,9 +3530,9 @@ SmallVector AddrOps; AM.getFullAddress(AddrOps); - MachineInstr *Result = - XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, - Size, Alignment, /*AllowCommute=*/true); + MachineInstr *Result = XII.foldMemoryOperandImpl( + *FuncInfo.MF, MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment, + /*AllowCommute=*/true); if (!Result) return false; @@ -3556,7 +3556,6 @@ } Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI)); - FuncInfo.MBB->insert(FuncInfo.InsertPt, Result); MI->eraseFromParent(); return true; } Index: llvm/trunk/lib/Target/X86/X86InstrInfo.h =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrInfo.h +++ llvm/trunk/lib/Target/X86/X86InstrInfo.h @@ -307,6 +307,7 @@ /// references has been changed. MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex) const override; /// foldMemoryOperand - Same as the previous version except it allows folding @@ -314,6 +315,7 @@ /// stack slot. MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const override; /// canFoldMemoryOperand - Returns true if the specified load / store is @@ -407,6 +409,7 @@ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, unsigned OpNum, ArrayRef MOs, + MachineBasicBlock::iterator InsertPt, unsigned Size, unsigned Alignment, bool AllowCommute) const; Index: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp +++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp @@ -4703,8 +4703,17 @@ return false; } +static void addOperands(MachineInstrBuilder &MIB, ArrayRef MOs) { + unsigned NumAddrOps = MOs.size(); + for (unsigned i = 0; i != NumAddrOps; ++i) + MIB.addOperand(MOs[i]); + if (NumAddrOps < 4) // FrameIndex only + addOffset(MIB, 0); +} + static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, ArrayRef MOs, + MachineBasicBlock::iterator InsertPt, MachineInstr *MI, const TargetInstrInfo &TII) { // Create the base instruction with the memory operand as the first part. @@ -4712,11 +4721,7 @@ MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), MI->getDebugLoc(), true); MachineInstrBuilder MIB(MF, NewMI); - unsigned NumAddrOps = MOs.size(); - for (unsigned i = 0; i != NumAddrOps; ++i) - MIB.addOperand(MOs[i]); - if (NumAddrOps < 4) // FrameIndex only - addOffset(MIB, 0); + addOperands(MIB, MOs); // Loop over the rest of the ri operands, converting them over. unsigned NumOps = MI->getDesc().getNumOperands()-2; @@ -4728,11 +4733,16 @@ MachineOperand &MO = MI->getOperand(i); MIB.addOperand(MO); } + + MachineBasicBlock *MBB = InsertPt->getParent(); + MBB->insert(InsertPt, NewMI); + return MIB; } static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, unsigned OpNo, ArrayRef MOs, + MachineBasicBlock::iterator InsertPt, MachineInstr *MI, const TargetInstrInfo &TII) { // Omit the implicit operands, something BuildMI can't do. MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), @@ -4743,38 +4753,32 @@ MachineOperand &MO = MI->getOperand(i); if (i == OpNo) { assert(MO.isReg() && "Expected to fold into reg operand!"); - unsigned NumAddrOps = MOs.size(); - for (unsigned i = 0; i != NumAddrOps; ++i) - MIB.addOperand(MOs[i]); - if (NumAddrOps < 4) // FrameIndex only - addOffset(MIB, 0); + addOperands(MIB, MOs); } else { MIB.addOperand(MO); } } + + MachineBasicBlock *MBB = InsertPt->getParent(); + MBB->insert(InsertPt, NewMI); + return MIB; } static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, ArrayRef MOs, + MachineBasicBlock::iterator InsertPt, MachineInstr *MI) { - MachineFunction &MF = *MI->getParent()->getParent(); - MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode)); - - unsigned NumAddrOps = MOs.size(); - for (unsigned i = 0; i != NumAddrOps; ++i) - MIB.addOperand(MOs[i]); - if (NumAddrOps < 4) // FrameIndex only - addOffset(MIB, 0); + MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, + MI->getDebugLoc(), TII.get(Opcode)); + addOperands(MIB, MOs); return MIB.addImm(0); } -MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - unsigned OpNum, - ArrayRef MOs, - unsigned Size, unsigned Align, - bool AllowCommute) const { +MachineInstr *X86InstrInfo::foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, unsigned OpNum, + ArrayRef MOs, MachineBasicBlock::iterator InsertPt, + unsigned Size, unsigned Align, bool AllowCommute) const { const DenseMap > *OpcodeTablePtr = nullptr; bool isCallRegIndirect = Subtarget.callRegIndirect(); @@ -4808,7 +4812,7 @@ isTwoAddrFold = true; } else if (OpNum == 0) { if (MI->getOpcode() == X86::MOV32r0) { - NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); + NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI); if (NewMI) return NewMI; } @@ -4853,9 +4857,9 @@ } if (isTwoAddrFold) - NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this); + NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this); else - NewMI = FuseInst(MF, Opcode, OpNum, MOs, MI, *this); + NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this); if (NarrowToMOV32rm) { // If this is the special case where we use a MOV32rm to load a 32-bit @@ -4907,8 +4911,9 @@ // Attempt to fold with the commuted version of the instruction. unsigned CommuteOp = (CommuteOpIdx1 == OriginalOpIdx ? CommuteOpIdx2 : CommuteOpIdx1); - NewMI = foldMemoryOperandImpl(MF, MI, CommuteOp, MOs, Size, Align, - /*AllowCommute=*/false); + NewMI = + foldMemoryOperandImpl(MF, MI, CommuteOp, MOs, InsertPt, Size, Align, + /*AllowCommute=*/false); if (NewMI) return NewMI; @@ -5137,10 +5142,9 @@ MI->addRegisterKilled(Reg, TRI, true); } -MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - ArrayRef Ops, - int FrameIndex) const { +MachineInstr *X86InstrInfo::foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex) const { // Check switch flag if (NoFusing) return nullptr; @@ -5179,8 +5183,8 @@ return nullptr; return foldMemoryOperandImpl(MF, MI, Ops[0], - MachineOperand::CreateFI(FrameIndex), Size, - Alignment, /*AllowCommute=*/true); + MachineOperand::CreateFI(FrameIndex), InsertPt, + Size, Alignment, /*AllowCommute=*/true); } static bool isPartialRegisterLoad(const MachineInstr &LoadMI, @@ -5202,17 +5206,16 @@ return false; } -MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - ArrayRef Ops, - MachineInstr *LoadMI) const { +MachineInstr *X86InstrInfo::foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { // If loading from a FrameIndex, fold directly from the FrameIndex. unsigned NumOps = LoadMI->getDesc().getNumOperands(); int FrameIndex; if (isLoadFromStackSlot(LoadMI, FrameIndex)) { if (isPartialRegisterLoad(*LoadMI, MF)) return nullptr; - return foldMemoryOperandImpl(MF, MI, Ops, FrameIndex); + return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex); } // Check switch flag @@ -5332,7 +5335,7 @@ break; } } - return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, + return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt, /*Size=*/0, Alignment, /*AllowCommute=*/true); }