Index: lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp =================================================================== --- lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp +++ lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp @@ -226,7 +226,7 @@ } /// Return true if MI is a member of the chain. - bool contains(MachineInstr *MI) { return Insts.count(MI) > 0; } + bool contains(MachineInstr &MI) { return Insts.count(&MI) > 0; } /// Return the number of instructions in the chain. unsigned size() const { @@ -252,9 +252,10 @@ MachineInstr *getKill() const { return KillInst; } /// Return an instruction that can be used as an iterator for the end /// of the chain. This is the maximum of KillInst (if set) and LastInst. - MachineBasicBlock::iterator getEnd() const { + MachineBasicBlock::iterator end() const { return ++MachineBasicBlock::iterator(KillInst ? KillInst : LastInst); } + MachineBasicBlock::iterator begin() const { return getStart(); } /// Can the Kill instruction (assuming one exists) be modified? bool isKillImmutable() const { return KillIsImmutable; } @@ -504,8 +505,7 @@ // of the chain? unsigned RegClassID = G->getStart()->getDesc().OpInfo[0].RegClass; BitVector AvailableRegs = RS.getRegsAvailable(TRI->getRegClass(RegClassID)); - for (MachineBasicBlock::iterator I = G->getStart(), E = G->getEnd(); - I != E; ++I) { + for (MachineBasicBlock::iterator I = G->begin(), E = G->end(); I != E; ++I) { RS.forward(I); AvailableRegs &= RS.getRegsAvailable(TRI->getRegClass(RegClassID)); @@ -558,16 +558,14 @@ DEBUG(dbgs() << " - Scavenged register: " << TRI->getName(Reg) << "\n"); std::map Substs; - for (MachineBasicBlock::iterator I = G->getStart(), E = G->getEnd(); - I != E; ++I) { - if (!G->contains(I) && - (&*I != G->getKill() || G->isKillImmutable())) + for (MachineInstr &I : *G) { + if (!G->contains(I) && (&I != G->getKill() || G->isKillImmutable())) continue; // I is a member of G, or I is a mutable instruction that kills G. std::vector ToErase; - for (auto &U : I->operands()) { + for (auto &U : I.operands()) { if (U.isReg() && U.isUse() && Substs.find(U.getReg()) != Substs.end()) { unsigned OrigReg = U.getReg(); U.setReg(Substs[OrigReg]); @@ -587,11 +585,11 @@ Substs.erase(J); // Only change the def if this isn't the last instruction. - if (&*I != G->getKill()) { - MachineOperand &MO = I->getOperand(0); + if (&I != G->getKill()) { + MachineOperand &MO = I.getOperand(0); bool Change = TransformAll || getColor(MO.getReg()) != C; - if (G->requiresFixup() && &*I == G->getLast()) + if (G->requiresFixup() && &I == G->getLast()) Change = false; if (Change) { Index: lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp =================================================================== --- lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp +++ lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp @@ -76,12 +76,12 @@ // isProfitableToTransform - Predicate function to determine whether an // instruction should be transformed to its equivalent AdvSIMD scalar // instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example. - bool isProfitableToTransform(const MachineInstr *MI) const; + bool isProfitableToTransform(const MachineInstr &MI) const; // transformInstruction - Perform the transformation of an instruction // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs // to be the correct register class, minimizing cross-class copies. - void transformInstruction(MachineInstr *MI); + void transformInstruction(MachineInstr &MI); // processMachineBasicBlock - Main optimzation loop. bool processMachineBasicBlock(MachineBasicBlock *MBB); @@ -189,16 +189,16 @@ return Opc; } -static bool isTransformable(const MachineInstr *MI) { - unsigned Opc = MI->getOpcode(); +static bool isTransformable(const MachineInstr &MI) { + unsigned Opc = MI.getOpcode(); return Opc != getTransformOpcode(Opc); } // isProfitableToTransform - Predicate function to determine whether an // instruction should be transformed to its equivalent AdvSIMD scalar // instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example. -bool -AArch64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const { +bool AArch64AdvSIMDScalar::isProfitableToTransform( + const MachineInstr &MI) const { // If this instruction isn't eligible to be transformed (no SIMD equivalent), // early exit since that's the common case. if (!isTransformable(MI)) @@ -209,8 +209,8 @@ unsigned NumNewCopies = 3; unsigned NumRemovableCopies = 0; - unsigned OrigSrc0 = MI->getOperand(1).getReg(); - unsigned OrigSrc1 = MI->getOperand(2).getReg(); + unsigned OrigSrc0 = MI.getOperand(1).getReg(); + unsigned OrigSrc1 = MI.getOperand(2).getReg(); unsigned SubReg0; unsigned SubReg1; if (!MRI->def_empty(OrigSrc0)) { @@ -244,14 +244,14 @@ // any of the uses is a transformable instruction, it's likely the tranforms // will chain, enabling us to save a copy there, too. This is an aggressive // heuristic that approximates the graph based cost analysis described above. - unsigned Dst = MI->getOperand(0).getReg(); + unsigned Dst = MI.getOperand(0).getReg(); bool AllUsesAreCopies = true; for (MachineRegisterInfo::use_instr_nodbg_iterator Use = MRI->use_instr_nodbg_begin(Dst), E = MRI->use_instr_nodbg_end(); Use != E; ++Use) { unsigned SubReg; - if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(&*Use)) + if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(*Use)) ++NumRemovableCopies; // If the use is an INSERT_SUBREG, that's still something that can // directly use the FPR64, so we don't invalidate AllUsesAreCopies. It's @@ -279,12 +279,11 @@ return TransformAll; } -static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr *MI, +static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI, unsigned Dst, unsigned Src, bool IsKill) { - MachineInstrBuilder MIB = - BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AArch64::COPY), - Dst) - .addReg(Src, getKillRegState(IsKill)); + MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), + TII->get(AArch64::COPY), Dst) + .addReg(Src, getKillRegState(IsKill)); DEBUG(dbgs() << " adding copy: " << *MIB); ++NumCopiesInserted; return MIB; @@ -293,17 +292,17 @@ // transformInstruction - Perform the transformation of an instruction // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs // to be the correct register class, minimizing cross-class copies. -void AArch64AdvSIMDScalar::transformInstruction(MachineInstr *MI) { - DEBUG(dbgs() << "Scalar transform: " << *MI); +void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) { + DEBUG(dbgs() << "Scalar transform: " << MI); - MachineBasicBlock *MBB = MI->getParent(); - unsigned OldOpc = MI->getOpcode(); + MachineBasicBlock *MBB = MI.getParent(); + unsigned OldOpc = MI.getOpcode(); unsigned NewOpc = getTransformOpcode(OldOpc); assert(OldOpc != NewOpc && "transform an instruction to itself?!"); // Check if we need a copy for the source registers. - unsigned OrigSrc0 = MI->getOperand(1).getReg(); - unsigned OrigSrc1 = MI->getOperand(2).getReg(); + unsigned OrigSrc0 = MI.getOperand(1).getReg(); + unsigned OrigSrc1 = MI.getOperand(2).getReg(); unsigned Src0 = 0, SubReg0; unsigned Src1 = 0, SubReg1; bool KillSrc0 = false, KillSrc1 = false; @@ -368,17 +367,17 @@ // For now, all of the new instructions have the same simple three-register // form, so no need to special case based on what instruction we're // building. - BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(NewOpc), Dst) + BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(NewOpc), Dst) .addReg(Src0, getKillRegState(KillSrc0), SubReg0) .addReg(Src1, getKillRegState(KillSrc1), SubReg1); // Now copy the result back out to a GPR. // FIXME: Try to avoid this if all uses could actually just use the FPR64 // directly. - insertCopy(TII, MI, MI->getOperand(0).getReg(), Dst, true); + insertCopy(TII, MI, MI.getOperand(0).getReg(), Dst, true); // Erase the old instruction. - MI->eraseFromParent(); + MI.eraseFromParent(); ++NumScalarInsnsUsed; } @@ -387,8 +386,7 @@ bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) { bool Changed = false; for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) { - MachineInstr *MI = I; - ++I; + MachineInstr &MI = *I++; if (isProfitableToTransform(MI)) { transformInstruction(MI); Changed = true; Index: lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp =================================================================== --- lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp +++ lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp @@ -72,9 +72,9 @@ break; if (TLSBaseAddrReg) - I = replaceTLSBaseAddrCall(I, TLSBaseAddrReg); + I = replaceTLSBaseAddrCall(*I, TLSBaseAddrReg); else - I = setRegister(I, &TLSBaseAddrReg); + I = setRegister(*I, &TLSBaseAddrReg); Changed = true; break; default: @@ -92,27 +92,27 @@ // Replace the TLS_base_addr instruction I with a copy from // TLSBaseAddrReg, returning the new instruction. - MachineInstr *replaceTLSBaseAddrCall(MachineInstr *I, + MachineInstr *replaceTLSBaseAddrCall(MachineInstr &I, unsigned TLSBaseAddrReg) { - MachineFunction *MF = I->getParent()->getParent(); + MachineFunction *MF = I.getParent()->getParent(); const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); // Insert a Copy from TLSBaseAddrReg to x0, which is where the rest of the // code sequence assumes the address will be. - MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(), - TII->get(TargetOpcode::COPY), - AArch64::X0).addReg(TLSBaseAddrReg); + MachineInstr *Copy = BuildMI(*I.getParent(), I, I.getDebugLoc(), + TII->get(TargetOpcode::COPY), AArch64::X0) + .addReg(TLSBaseAddrReg); // Erase the TLS_base_addr instruction. - I->eraseFromParent(); + I.eraseFromParent(); return Copy; } // Create a virtal register in *TLSBaseAddrReg, and populate it by // inserting a copy instruction after I. Returns the new instruction. - MachineInstr *setRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) { - MachineFunction *MF = I->getParent()->getParent(); + MachineInstr *setRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) { + MachineFunction *MF = I.getParent()->getParent(); const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); // Create a virtual register for the TLS base address. @@ -121,7 +121,7 @@ // Insert a copy from X0 to TLSBaseAddrReg for later. MachineInstr *Copy = - BuildMI(*I->getParent(), ++I->getIterator(), I->getDebugLoc(), + BuildMI(*I.getParent(), ++I.getIterator(), I.getDebugLoc(), TII->get(TargetOpcode::COPY), *TLSBaseAddrReg) .addReg(AArch64::X0); Index: lib/Target/AArch64/AArch64ConditionOptimizer.cpp =================================================================== --- lib/Target/AArch64/AArch64ConditionOptimizer.cpp +++ lib/Target/AArch64/AArch64ConditionOptimizer.cpp @@ -173,7 +173,7 @@ DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n'); return nullptr; } - return I; + return &*I; } // Prevent false positive case like: // cmp w19, #0 Index: lib/Target/AArch64/AArch64ConditionalCompares.cpp =================================================================== --- lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -304,7 +304,7 @@ case AArch64::CBNZW: case AArch64::CBNZX: // These can be converted into a ccmp against #0. - return I; + return &*I; } ++NumCmpTermRejs; DEBUG(dbgs() << "Flags not used by terminator: " << *I); @@ -335,7 +335,7 @@ case AArch64::ADDSWrr: case AArch64::ADDSXrr: if (isDeadDef(I->getOperand(0).getReg())) - return I; + return &*I; DEBUG(dbgs() << "Can't convert compare with live destination: " << *I); ++NumLiveDstRejs; return nullptr; @@ -343,7 +343,7 @@ case AArch64::FCMPDrr: case AArch64::FCMPESrr: case AArch64::FCMPEDrr: - return I; + return &*I; } // Check for flag reads and clobbers. Index: lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64FrameLowering.cpp +++ lib/Target/AArch64/AArch64FrameLowering.cpp @@ -384,9 +384,9 @@ // Fixup callee-save register save/restore instructions to take into account // combined SP bump by adding the local stack size to the stack offsets. -static void fixupCalleeSaveRestoreStackOffset(MachineInstr *MI, +static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, unsigned LocalStackSize) { - unsigned Opc = MI->getOpcode(); + unsigned Opc = MI.getOpcode(); (void)Opc; assert((Opc == AArch64::STPXi || Opc == AArch64::STPDi || Opc == AArch64::STRXui || Opc == AArch64::STRDui || @@ -394,11 +394,11 @@ Opc == AArch64::LDRXui || Opc == AArch64::LDRDui) && "Unexpected callee-save save/restore opcode!"); - unsigned OffsetIdx = MI->getNumExplicitOperands() - 1; - assert(MI->getOperand(OffsetIdx - 1).getReg() == AArch64::SP && + unsigned OffsetIdx = MI.getNumExplicitOperands() - 1; + assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP && "Unexpected base register in callee-save save/restore instruction!"); // Last operand is immediate offset that needs fixing. - MachineOperand &OffsetOpnd = MI->getOperand(OffsetIdx); + MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx); // All generated opcodes have scaled offsets. assert(LocalStackSize % 8 == 0); OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / 8); @@ -477,7 +477,7 @@ MachineBasicBlock::iterator End = MBB.end(); while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup)) { if (CombineSPBump) - fixupCalleeSaveRestoreStackOffset(MBBI, AFI->getLocalStackSize()); + fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize()); ++MBBI; } if (HasFP) { @@ -724,7 +724,7 @@ ++LastPopI; break; } else if (CombineSPBump) - fixupCalleeSaveRestoreStackOffset(LastPopI, AFI->getLocalStackSize()); + fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize()); } // If there is a single SP update, insert it before the ret and we're done. Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -104,7 +104,7 @@ return false; // Get the last instruction in the block. - MachineInstr *LastInst = I; + MachineInstr *LastInst = &*I; // If there is only one terminator instruction, process it. unsigned LastOpc = LastInst->getOpcode(); @@ -122,7 +122,7 @@ } // Get the instruction before it if it is a terminator. - MachineInstr *SecondLastInst = I; + MachineInstr *SecondLastInst = &*I; unsigned SecondLastOpc = SecondLastInst->getOpcode(); // If AllowModify is true and the block ends with two or more unconditional @@ -137,7 +137,7 @@ TBB = LastInst->getOperand(0).getMBB(); return false; } else { - SecondLastInst = I; + SecondLastInst = &*I; SecondLastOpc = SecondLastInst->getOpcode(); } } @@ -846,10 +846,9 @@ // From must be above To. assert(std::find_if(MachineBasicBlock::reverse_iterator(To), - To->getParent()->rend(), - [From](MachineInstr &MI) { - return &MI == From; - }) != To->getParent()->rend()); + To->getParent()->rend(), [From](MachineInstr &MI) { + return MachineBasicBlock::iterator(MI) == From; + }) != To->getParent()->rend()); // We iterate backward starting \p To until we hit \p From. for (--To; To != From; --To) { Index: lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp =================================================================== --- lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -143,7 +143,7 @@ // Find an instruction that updates the base register of the ld/st // instruction. - bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI, + bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI, unsigned BaseReg, int Offset); // Merge a pre- or post-index base register update into a ld/st instruction. @@ -179,8 +179,8 @@ INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt", AARCH64_LOAD_STORE_OPT_NAME, false, false) -static unsigned getBitExtrOpcode(MachineInstr *MI) { - switch (MI->getOpcode()) { +static unsigned getBitExtrOpcode(MachineInstr &MI) { + switch (MI.getOpcode()) { default: llvm_unreachable("Unexpected opcode."); case AArch64::LDRBBui: @@ -224,8 +224,8 @@ } } -static bool isNarrowLoad(MachineInstr *MI) { - return isNarrowLoad(MI->getOpcode()); +static bool isNarrowLoad(MachineInstr &MI) { + return isNarrowLoad(MI.getOpcode()); } static bool isNarrowLoadOrStore(unsigned Opc) { @@ -233,8 +233,8 @@ } // Scaling factor for unscaled load or store. -static int getMemScale(MachineInstr *MI) { - switch (MI->getOpcode()) { +static int getMemScale(MachineInstr &MI) { + switch (MI.getOpcode()) { default: llvm_unreachable("Opcode has unknown scale!"); case AArch64::LDRBBui: @@ -414,10 +414,10 @@ } } -static unsigned isMatchingStore(MachineInstr *LoadInst, - MachineInstr *StoreInst) { - unsigned LdOpc = LoadInst->getOpcode(); - unsigned StOpc = StoreInst->getOpcode(); +static unsigned isMatchingStore(MachineInstr &LoadInst, + MachineInstr &StoreInst) { + unsigned LdOpc = LoadInst.getOpcode(); + unsigned StOpc = StoreInst.getOpcode(); switch (LdOpc) { default: llvm_unreachable("Unsupported load instruction!"); @@ -562,8 +562,8 @@ } } -static bool isPairedLdSt(const MachineInstr *MI) { - switch (MI->getOpcode()) { +static bool isPairedLdSt(const MachineInstr &MI) { + switch (MI.getOpcode()) { default: return false; case AArch64::LDPSi: @@ -581,33 +581,33 @@ } } -static const MachineOperand &getLdStRegOp(const MachineInstr *MI, +static const MachineOperand &getLdStRegOp(const MachineInstr &MI, unsigned PairedRegOp = 0) { assert(PairedRegOp < 2 && "Unexpected register operand idx."); unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0; - return MI->getOperand(Idx); + return MI.getOperand(Idx); } -static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) { +static const MachineOperand &getLdStBaseOp(const MachineInstr &MI) { unsigned Idx = isPairedLdSt(MI) ? 2 : 1; - return MI->getOperand(Idx); + return MI.getOperand(Idx); } -static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) { +static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI) { unsigned Idx = isPairedLdSt(MI) ? 3 : 2; - return MI->getOperand(Idx); + return MI.getOperand(Idx); } -static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst, - MachineInstr *StoreInst, +static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst, + MachineInstr &StoreInst, const AArch64InstrInfo *TII) { assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st."); int LoadSize = getMemScale(LoadInst); int StoreSize = getMemScale(StoreInst); - int UnscaledStOffset = TII->isUnscaledLdSt(*StoreInst) + int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst) ? getLdStOffsetOp(StoreInst).getImm() : getLdStOffsetOp(StoreInst).getImm() * StoreSize; - int UnscaledLdOffset = TII->isUnscaledLdSt(*LoadInst) + int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst) ? getLdStOffsetOp(LoadInst).getImm() : getLdStOffsetOp(LoadInst).getImm() * LoadSize; return (UnscaledStOffset <= UnscaledLdOffset) && @@ -618,11 +618,11 @@ return isNarrowStore(Opc) || Opc == AArch64::STRWui || Opc == AArch64::STURWi; } -static bool isPromotableZeroStoreOpcode(MachineInstr *MI) { - return isPromotableZeroStoreOpcode(MI->getOpcode()); +static bool isPromotableZeroStoreOpcode(MachineInstr &MI) { + return isPromotableZeroStoreOpcode(MI.getOpcode()); } -static bool isPromotableZeroStoreInst(MachineInstr *MI) { +static bool isPromotableZeroStoreInst(MachineInstr &MI) { return (isPromotableZeroStoreOpcode(MI)) && getLdStRegOp(MI).getReg() == AArch64::WZR; } @@ -642,7 +642,7 @@ unsigned Opc = I->getOpcode(); bool IsScaled = !TII->isUnscaledLdSt(Opc); - int OffsetStride = IsScaled ? 1 : getMemScale(I); + int OffsetStride = IsScaled ? 1 : getMemScale(*I); bool MergeForward = Flags.getMergeForward(); // Insert our new paired instruction after whichever of the paired @@ -651,20 +651,20 @@ // Also based on MergeForward is from where we copy the base register operand // so we get the flags compatible with the input code. const MachineOperand &BaseRegOp = - MergeForward ? getLdStBaseOp(MergeMI) : getLdStBaseOp(I); + MergeForward ? getLdStBaseOp(*MergeMI) : getLdStBaseOp(*I); // Which register is Rt and which is Rt2 depends on the offset order. MachineInstr *RtMI, *Rt2MI; - if (getLdStOffsetOp(I).getImm() == - getLdStOffsetOp(MergeMI).getImm() + OffsetStride) { - RtMI = MergeMI; - Rt2MI = I; + if (getLdStOffsetOp(*I).getImm() == + getLdStOffsetOp(*MergeMI).getImm() + OffsetStride) { + RtMI = &*MergeMI; + Rt2MI = &*I; } else { - RtMI = I; - Rt2MI = MergeMI; + RtMI = &*I; + Rt2MI = &*MergeMI; } - int OffsetImm = getLdStOffsetOp(RtMI).getImm(); + int OffsetImm = getLdStOffsetOp(*RtMI).getImm(); // Change the scaled offset from small to large type. if (IsScaled) { assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge"); @@ -674,7 +674,7 @@ DebugLoc DL = I->getDebugLoc(); MachineBasicBlock *MBB = I->getParent(); if (isNarrowLoad(Opc)) { - MachineInstr *RtNewDest = MergeForward ? I : MergeMI; + MachineInstr *RtNewDest = &*(MergeForward ? I : MergeMI); // When merging small (< 32 bit) loads for big-endian targets, the order of // the component parts gets swapped. if (!Subtarget->isLittleEndian()) @@ -683,7 +683,7 @@ MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2; NewMemMI = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc))) - .addOperand(getLdStRegOp(RtNewDest)) + .addOperand(getLdStRegOp(*RtNewDest)) .addOperand(BaseRegOp) .addImm(OffsetImm) .setMemRefs(I->mergeMemRefsWith(*MergeMI)); @@ -698,32 +698,32 @@ DEBUG(dbgs() << " with instructions:\n "); DEBUG((NewMemMI)->print(dbgs())); - int Width = getMemScale(I) == 1 ? 8 : 16; + int Width = getMemScale(*I) == 1 ? 8 : 16; int LSBLow = 0; int LSBHigh = Width; int ImmsLow = LSBLow + Width - 1; int ImmsHigh = LSBHigh + Width - 1; - MachineInstr *ExtDestMI = MergeForward ? MergeMI : I; + MachineInstr *ExtDestMI = &*(MergeForward ? MergeMI : I); if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) { // Create the bitfield extract for high bits. BitExtMI1 = - BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(Rt2MI))) - .addOperand(getLdStRegOp(Rt2MI)) - .addReg(getLdStRegOp(RtNewDest).getReg()) + BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*Rt2MI))) + .addOperand(getLdStRegOp(*Rt2MI)) + .addReg(getLdStRegOp(*RtNewDest).getReg()) .addImm(LSBHigh) .addImm(ImmsHigh); // Create the bitfield extract for low bits. if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) { // For unsigned, prefer to use AND for low bits. BitExtMI2 = BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::ANDWri)) - .addOperand(getLdStRegOp(RtMI)) - .addReg(getLdStRegOp(RtNewDest).getReg()) + .addOperand(getLdStRegOp(*RtMI)) + .addReg(getLdStRegOp(*RtNewDest).getReg()) .addImm(ImmsLow); } else { BitExtMI2 = - BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(RtMI))) - .addOperand(getLdStRegOp(RtMI)) - .addReg(getLdStRegOp(RtNewDest).getReg()) + BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*RtMI))) + .addOperand(getLdStRegOp(*RtMI)) + .addReg(getLdStRegOp(*RtNewDest).getReg()) .addImm(LSBLow) .addImm(ImmsLow); } @@ -732,23 +732,23 @@ if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) { // For unsigned, prefer to use AND for low bits. BitExtMI1 = BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::ANDWri)) - .addOperand(getLdStRegOp(RtMI)) - .addReg(getLdStRegOp(RtNewDest).getReg()) + .addOperand(getLdStRegOp(*RtMI)) + .addReg(getLdStRegOp(*RtNewDest).getReg()) .addImm(ImmsLow); } else { BitExtMI1 = - BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(RtMI))) - .addOperand(getLdStRegOp(RtMI)) - .addReg(getLdStRegOp(RtNewDest).getReg()) + BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*RtMI))) + .addOperand(getLdStRegOp(*RtMI)) + .addReg(getLdStRegOp(*RtNewDest).getReg()) .addImm(LSBLow) .addImm(ImmsLow); } // Create the bitfield extract for high bits. BitExtMI2 = - BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(Rt2MI))) - .addOperand(getLdStRegOp(Rt2MI)) - .addReg(getLdStRegOp(RtNewDest).getReg()) + BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*Rt2MI))) + .addOperand(getLdStRegOp(*Rt2MI)) + .addReg(getLdStRegOp(*RtNewDest).getReg()) .addImm(LSBHigh) .addImm(ImmsHigh); } @@ -766,7 +766,7 @@ MergeMI->eraseFromParent(); return NextI; } - assert(isPromotableZeroStoreInst(I) && isPromotableZeroStoreInst(MergeMI) && + assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) && "Expected promotable zero store"); // Construct the new instruction. @@ -809,7 +809,7 @@ unsigned Opc = SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode()); bool IsUnscaled = TII->isUnscaledLdSt(Opc); - int OffsetStride = IsUnscaled ? getMemScale(I) : 1; + int OffsetStride = IsUnscaled ? getMemScale(*I) : 1; bool MergeForward = Flags.getMergeForward(); // Insert our new paired instruction after whichever of the paired @@ -818,20 +818,20 @@ // Also based on MergeForward is from where we copy the base register operand // so we get the flags compatible with the input code. const MachineOperand &BaseRegOp = - MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I); + MergeForward ? getLdStBaseOp(*Paired) : getLdStBaseOp(*I); - int Offset = getLdStOffsetOp(I).getImm(); - int PairedOffset = getLdStOffsetOp(Paired).getImm(); + int Offset = getLdStOffsetOp(*I).getImm(); + int PairedOffset = getLdStOffsetOp(*Paired).getImm(); bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode()); if (IsUnscaled != PairedIsUnscaled) { // We're trying to pair instructions that differ in how they are scaled. If // I is scaled then scale the offset of Paired accordingly. Otherwise, do // the opposite (i.e., make Paired's offset unscaled). - int MemSize = getMemScale(Paired); + int MemSize = getMemScale(*Paired); if (PairedIsUnscaled) { // If the unscaled offset isn't a multiple of the MemSize, we can't // pair the operations together. - assert(!(PairedOffset % getMemScale(Paired)) && + assert(!(PairedOffset % getMemScale(*Paired)) && "Offset should be a multiple of the stride!"); PairedOffset /= MemSize; } else { @@ -842,23 +842,23 @@ // Which register is Rt and which is Rt2 depends on the offset order. MachineInstr *RtMI, *Rt2MI; if (Offset == PairedOffset + OffsetStride) { - RtMI = Paired; - Rt2MI = I; + RtMI = &*Paired; + Rt2MI = &*I; // Here we swapped the assumption made for SExtIdx. // I.e., we turn ldp I, Paired into ldp Paired, I. // Update the index accordingly. if (SExtIdx != -1) SExtIdx = (SExtIdx + 1) % 2; } else { - RtMI = I; - Rt2MI = Paired; + RtMI = &*I; + Rt2MI = &*Paired; } - int OffsetImm = getLdStOffsetOp(RtMI).getImm(); + int OffsetImm = getLdStOffsetOp(*RtMI).getImm(); // Scale the immediate offset, if necessary. if (TII->isUnscaledLdSt(RtMI->getOpcode())) { - assert(!(OffsetImm % getMemScale(RtMI)) && + assert(!(OffsetImm % getMemScale(*RtMI)) && "Unscaled offset cannot be scaled."); - OffsetImm /= getMemScale(RtMI); + OffsetImm /= getMemScale(*RtMI); } // Construct the new instruction. @@ -866,8 +866,8 @@ DebugLoc DL = I->getDebugLoc(); MachineBasicBlock *MBB = I->getParent(); MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc))) - .addOperand(getLdStRegOp(RtMI)) - .addOperand(getLdStRegOp(Rt2MI)) + .addOperand(getLdStRegOp(*RtMI)) + .addOperand(getLdStRegOp(*Rt2MI)) .addOperand(BaseRegOp) .addImm(OffsetImm) .setMemRefs(I->mergeMemRefsWith(*Paired)); @@ -930,10 +930,10 @@ MachineBasicBlock::iterator NextI = LoadI; ++NextI; - int LoadSize = getMemScale(LoadI); - int StoreSize = getMemScale(StoreI); - unsigned LdRt = getLdStRegOp(LoadI).getReg(); - unsigned StRt = getLdStRegOp(StoreI).getReg(); + int LoadSize = getMemScale(*LoadI); + int StoreSize = getMemScale(*StoreI); + unsigned LdRt = getLdStRegOp(*LoadI).getReg(); + unsigned StRt = getLdStRegOp(*StoreI).getReg(); bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt); assert((IsStoreXReg || @@ -968,11 +968,11 @@ "Unsupported ld/st match"); assert(LoadSize <= StoreSize && "Invalid load size"); int UnscaledLdOffset = IsUnscaled - ? getLdStOffsetOp(LoadI).getImm() - : getLdStOffsetOp(LoadI).getImm() * LoadSize; + ? getLdStOffsetOp(*LoadI).getImm() + : getLdStOffsetOp(*LoadI).getImm() * LoadSize; int UnscaledStOffset = IsUnscaled - ? getLdStOffsetOp(StoreI).getImm() - : getLdStOffsetOp(StoreI).getImm() * StoreSize; + ? getLdStOffsetOp(*StoreI).getImm() + : getLdStOffsetOp(*StoreI).getImm() * StoreSize; int Width = LoadSize * 8; int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset); int Imms = Immr + Width - 1; @@ -1028,10 +1028,10 @@ /// trackRegDefsUses - Remember what registers the specified instruction uses /// and modifies. -static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs, +static void trackRegDefsUses(const MachineInstr &MI, BitVector &ModifiedRegs, BitVector &UsedRegs, const TargetRegisterInfo *TRI) { - for (const MachineOperand &MO : MI->operands()) { + for (const MachineOperand &MO : MI.operands()) { if (MO.isRegMask()) ModifiedRegs.setBitsNotInMask(MO.getRegMask()); @@ -1100,7 +1100,7 @@ MachineBasicBlock::iterator &StoreI) { MachineBasicBlock::iterator B = I->getParent()->begin(); MachineBasicBlock::iterator MBBI = I; - MachineInstr *LoadMI = I; + MachineInstr &LoadMI = *I; unsigned BaseReg = getLdStBaseOp(LoadMI).getReg(); // If the load is the first instruction in the block, there's obviously @@ -1116,17 +1116,17 @@ unsigned Count = 0; do { --MBBI; - MachineInstr *MI = MBBI; + MachineInstr &MI = *MBBI; // Don't count DBG_VALUE instructions towards the search limit. - if (!MI->isDebugValue()) + if (!MI.isDebugValue()) ++Count; // If the load instruction reads directly from the address to which the // store instruction writes and the stored value is not modified, we can // promote the load. Since we do not handle stores with pre-/post-index, // it's unnecessary to check if BaseReg is modified by the store itself. - if (MI->mayStore() && isMatchingStore(LoadMI, MI) && + if (MI.mayStore() && isMatchingStore(LoadMI, MI) && BaseReg == getLdStBaseOp(MI).getReg() && isLdOffsetInRangeOfSt(LoadMI, MI, TII) && !ModifiedRegs[getLdStRegOp(MI).getReg()]) { @@ -1134,7 +1134,7 @@ return true; } - if (MI->isCall()) + if (MI.isCall()) return false; // Update modified / uses register lists. @@ -1146,7 +1146,7 @@ return false; // If we encounter a store aliased with the load, return early. - if (MI->mayStore() && mayAlias(*LoadMI, *MI, TII)) + if (MI.mayStore() && mayAlias(LoadMI, MI, TII)) return false; } while (MBBI != B && Count < Limit); return false; @@ -1154,20 +1154,20 @@ // Returns true if FirstMI and MI are candidates for merging or pairing. // Otherwise, returns false. -static bool areCandidatesToMergeOrPair(MachineInstr *FirstMI, MachineInstr *MI, +static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI, LdStPairFlags &Flags, const AArch64InstrInfo *TII) { // If this is volatile or if pairing is suppressed, not a candidate. - if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(*MI)) + if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI)) return false; // We should have already checked FirstMI for pair suppression and volatility. - assert(!FirstMI->hasOrderedMemoryRef() && - !TII->isLdStPairSuppressed(*FirstMI) && + assert(!FirstMI.hasOrderedMemoryRef() && + !TII->isLdStPairSuppressed(FirstMI) && "FirstMI shouldn't get here if either of these checks are true."); - unsigned OpcA = FirstMI->getOpcode(); - unsigned OpcB = MI->getOpcode(); + unsigned OpcA = FirstMI.getOpcode(); + unsigned OpcB = MI.getOpcode(); // Opcodes match: nothing more to check. if (OpcA == OpcB) @@ -1208,11 +1208,11 @@ bool FindNarrowMerge) { MachineBasicBlock::iterator E = I->getParent()->end(); MachineBasicBlock::iterator MBBI = I; - MachineInstr *FirstMI = I; + MachineInstr &FirstMI = *I; ++MBBI; - bool MayLoad = FirstMI->mayLoad(); - bool IsUnscaled = TII->isUnscaledLdSt(*FirstMI); + bool MayLoad = FirstMI.mayLoad(); + bool IsUnscaled = TII->isUnscaledLdSt(FirstMI); unsigned Reg = getLdStRegOp(FirstMI).getReg(); unsigned BaseReg = getLdStBaseOp(FirstMI).getReg(); int Offset = getLdStOffsetOp(FirstMI).getImm(); @@ -1228,10 +1228,10 @@ SmallVector MemInsns; for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) { - MachineInstr *MI = MBBI; + MachineInstr &MI = *MBBI; // Skip DBG_VALUE instructions. Otherwise debug info can affect the // optimization by changing how far we scan. - if (MI->isDebugValue()) + if (MI.isDebugValue()) continue; // Now that we know this is a real instruction, count it. @@ -1240,7 +1240,7 @@ Flags.setSExtIdx(-1); if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) && getLdStOffsetOp(MI).isImm()) { - assert(MI->mayLoadOrStore() && "Expected memory operation."); + assert(MI.mayLoadOrStore() && "Expected memory operation."); // If we've found another instruction with the same opcode, check to see // if the base and offset are compatible with our starting instruction. // These instructions all have scaled immediate operands, so we just @@ -1249,7 +1249,7 @@ // a relocation. unsigned MIBaseReg = getLdStBaseOp(MI).getReg(); int MIOffset = getLdStOffsetOp(MI).getImm(); - bool MIIsUnscaled = TII->isUnscaledLdSt(*MI); + bool MIIsUnscaled = TII->isUnscaledLdSt(MI); if (IsUnscaled != MIIsUnscaled) { // We're trying to pair instructions that differ in how they are scaled. // If FirstMI is scaled then scale the offset of MI accordingly. @@ -1277,7 +1277,7 @@ if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) || (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) { trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); - MemInsns.push_back(MI); + MemInsns.push_back(&MI); continue; } } else { @@ -1287,7 +1287,7 @@ // a pairwise instruction, bail and keep looking. if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) { trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); - MemInsns.push_back(MI); + MemInsns.push_back(&MI); continue; } // If the alignment requirements of the paired (scaled) instruction @@ -1295,7 +1295,7 @@ // looking. if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) { trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); - MemInsns.push_back(MI); + MemInsns.push_back(&MI); continue; } } @@ -1304,7 +1304,7 @@ // registers the same is UNPREDICTABLE and will result in an exception. if (MayLoad && Reg == getLdStRegOp(MI).getReg()) { trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); - MemInsns.push_back(MI); + MemInsns.push_back(&MI); continue; } @@ -1313,8 +1313,8 @@ // and first alias with the second, we can combine the second into the // first. if (!ModifiedRegs[getLdStRegOp(MI).getReg()] && - !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) && - !mayAlias(*MI, MemInsns, TII)) { + !(MI.mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) && + !mayAlias(MI, MemInsns, TII)) { Flags.setMergeForward(false); return MBBI; } @@ -1325,7 +1325,7 @@ // into the second. if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] && !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) && - !mayAlias(*FirstMI, MemInsns, TII)) { + !mayAlias(FirstMI, MemInsns, TII)) { Flags.setMergeForward(true); return MBBI; } @@ -1336,7 +1336,7 @@ // If the instruction wasn't a matching load or store. Stop searching if we // encounter a call instruction that might modify memory. - if (MI->isCall()) + if (MI.isCall()) return E; // Update modified / uses register lists. @@ -1348,8 +1348,8 @@ return E; // Update list of instructions that read/write memory. - if (MI->mayLoadOrStore()) - MemInsns.push_back(MI); + if (MI.mayLoadOrStore()) + MemInsns.push_back(&MI); } return E; } @@ -1377,22 +1377,22 @@ unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode()) : getPostIndexedOpcode(I->getOpcode()); MachineInstrBuilder MIB; - if (!isPairedLdSt(I)) { + if (!isPairedLdSt(*I)) { // Non-paired instruction. MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc)) - .addOperand(getLdStRegOp(Update)) - .addOperand(getLdStRegOp(I)) - .addOperand(getLdStBaseOp(I)) + .addOperand(getLdStRegOp(*Update)) + .addOperand(getLdStRegOp(*I)) + .addOperand(getLdStBaseOp(*I)) .addImm(Value) .setMemRefs(I->memoperands_begin(), I->memoperands_end()); } else { // Paired instruction. - int Scale = getMemScale(I); + int Scale = getMemScale(*I); MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc)) - .addOperand(getLdStRegOp(Update)) - .addOperand(getLdStRegOp(I, 0)) - .addOperand(getLdStRegOp(I, 1)) - .addOperand(getLdStBaseOp(I)) + .addOperand(getLdStRegOp(*Update)) + .addOperand(getLdStRegOp(*I, 0)) + .addOperand(getLdStRegOp(*I, 1)) + .addOperand(getLdStBaseOp(*I)) .addImm(Value / Scale) .setMemRefs(I->memoperands_begin(), I->memoperands_end()); } @@ -1417,10 +1417,10 @@ return NextI; } -bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI, - MachineInstr *MI, +bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI, + MachineInstr &MI, unsigned BaseReg, int Offset) { - switch (MI->getOpcode()) { + switch (MI.getOpcode()) { default: break; case AArch64::SUBXri: @@ -1430,20 +1430,20 @@ case AArch64::ADDXri: // Make sure it's a vanilla immediate operand, not a relocation or // anything else we can't handle. - if (!MI->getOperand(2).isImm()) + if (!MI.getOperand(2).isImm()) break; // Watch out for 1 << 12 shifted value. - if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm())) + if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm())) break; // The update instruction source and destination register must be the // same as the load/store base register. - if (MI->getOperand(0).getReg() != BaseReg || - MI->getOperand(1).getReg() != BaseReg) + if (MI.getOperand(0).getReg() != BaseReg || + MI.getOperand(1).getReg() != BaseReg) break; bool IsPairedInsn = isPairedLdSt(MemMI); - int UpdateOffset = MI->getOperand(2).getImm(); + int UpdateOffset = MI.getOperand(2).getImm(); // For non-paired load/store instructions, the immediate must fit in a // signed 9-bit integer. if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256)) @@ -1464,7 +1464,7 @@ // If we have a non-zero Offset, we check that it matches the amount // we're adding to the register. - if (!Offset || Offset == MI->getOperand(2).getImm()) + if (!Offset || Offset == MI.getOperand(2).getImm()) return true; break; } @@ -1474,7 +1474,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward( MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) { MachineBasicBlock::iterator E = I->getParent()->end(); - MachineInstr *MemMI = I; + MachineInstr &MemMI = *I; MachineBasicBlock::iterator MBBI = I; unsigned BaseReg = getLdStBaseOp(MemMI).getReg(); @@ -1501,16 +1501,16 @@ UsedRegs.reset(); ++MBBI; for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) { - MachineInstr *MI = MBBI; + MachineInstr &MI = *MBBI; // Skip DBG_VALUE instructions. - if (MI->isDebugValue()) + if (MI.isDebugValue()) continue; // Now that we know this is a real instruction, count it. ++Count; // If we found a match, return it. - if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset)) + if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset)) return MBBI; // Update the status of what the instruction clobbered and used. @@ -1528,7 +1528,7 @@ MachineBasicBlock::iterator I, unsigned Limit) { MachineBasicBlock::iterator B = I->getParent()->begin(); MachineBasicBlock::iterator E = I->getParent()->end(); - MachineInstr *MemMI = I; + MachineInstr &MemMI = *I; MachineBasicBlock::iterator MBBI = I; unsigned BaseReg = getLdStBaseOp(MemMI).getReg(); @@ -1554,14 +1554,14 @@ unsigned Count = 0; do { --MBBI; - MachineInstr *MI = MBBI; + MachineInstr &MI = *MBBI; // Don't count DBG_VALUE instructions towards the search limit. - if (!MI->isDebugValue()) + if (!MI.isDebugValue()) ++Count; // If we found a match, return it. - if (isMatchingUpdateInsn(I, MI, BaseReg, Offset)) + if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset)) return MBBI; // Update the status of what the instruction clobbered and used. @@ -1577,9 +1577,9 @@ bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore( MachineBasicBlock::iterator &MBBI) { - MachineInstr *MI = MBBI; + MachineInstr &MI = *MBBI; // If this is a volatile load, don't mess with it. - if (MI->hasOrderedMemoryRef()) + if (MI.hasOrderedMemoryRef()) return false; // Make sure this is a reg+imm. @@ -1605,12 +1605,12 @@ // store. bool AArch64LoadStoreOpt::tryToMergeLdStInst( MachineBasicBlock::iterator &MBBI) { - assert((isNarrowLoad(MBBI) || isPromotableZeroStoreOpcode(MBBI)) && + assert((isNarrowLoad(*MBBI) || isPromotableZeroStoreOpcode(*MBBI)) && "Expected narrow op."); - MachineInstr *MI = MBBI; - MachineBasicBlock::iterator E = MI->getParent()->end(); + MachineInstr &MI = *MBBI; + MachineBasicBlock::iterator E = MI.getParent()->end(); - if (!TII->isCandidateToMergeOrPair(*MI)) + if (!TII->isCandidateToMergeOrPair(MI)) return false; // For promotable zero stores, the stored value should be WZR. @@ -1639,16 +1639,16 @@ // Find loads and stores that can be merged into a single load or store pair // instruction. bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) { - MachineInstr *MI = MBBI; - MachineBasicBlock::iterator E = MI->getParent()->end(); + MachineInstr &MI = *MBBI; + MachineBasicBlock::iterator E = MI.getParent()->end(); - if (!TII->isCandidateToMergeOrPair(*MI)) + if (!TII->isCandidateToMergeOrPair(MI)) return false; // Early exit if the offset is not possible to match. (6 bits of positive // range, plus allow an extra one in case we find a later insn that matches // with Offset-1) - bool IsUnscaled = TII->isUnscaledLdSt(*MI); + bool IsUnscaled = TII->isUnscaledLdSt(MI); int Offset = getLdStOffsetOp(MI).getImm(); int OffsetStride = IsUnscaled ? getMemScale(MI) : 1; if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride)) @@ -1660,7 +1660,7 @@ findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false); if (Paired != E) { ++NumPairCreated; - if (TII->isUnscaledLdSt(*MI)) + if (TII->isUnscaledLdSt(MI)) ++NumUnscaledPairCreated; // Keeping the iterator straight is a pain, so we let the merge routine tell // us what the next instruction is after it's done mucking about. @@ -1685,8 +1685,8 @@ // lsr w2, w1, #16 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); MBBI != E;) { - MachineInstr *MI = MBBI; - switch (MI->getOpcode()) { + MachineInstr &MI = *MBBI; + switch (MI.getOpcode()) { default: // Just move on to the next instruction. ++MBBI; @@ -1728,8 +1728,8 @@ // str wzr, [x0] for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); enableNarrowLdOpt && MBBI != E;) { - MachineInstr *MI = MBBI; - unsigned Opc = MI->getOpcode(); + MachineInstr &MI = *MBBI; + unsigned Opc = MI.getOpcode(); if (isPromotableZeroStoreOpcode(Opc) || (EnableNarrowLdMerge && isNarrowLoad(Opc))) { if (tryToMergeLdStInst(MBBI)) { @@ -1749,8 +1749,8 @@ // ldp x0, x1, [x2] for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); MBBI != E;) { - MachineInstr *MI = MBBI; - switch (MI->getOpcode()) { + MachineInstr &MI = *MBBI; + switch (MI.getOpcode()) { default: // Just move on to the next instruction. ++MBBI; @@ -1797,10 +1797,10 @@ // ldr x0, [x2], #4 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); MBBI != E;) { - MachineInstr *MI = MBBI; + MachineInstr &MI = *MBBI; // Do update merging. It's simpler to keep this separate from the above // switchs, though not strictly necessary. - unsigned Opc = MI->getOpcode(); + unsigned Opc = MI.getOpcode(); switch (Opc) { default: // Just move on to the next instruction.