Index: lib/Target/ARM/A15SDOptimizer.cpp =================================================================== --- lib/Target/ARM/A15SDOptimizer.cpp +++ lib/Target/ARM/A15SDOptimizer.cpp @@ -427,13 +427,11 @@ unsigned Lane, bool QPR) { unsigned Out = MRI->createVirtualRegister(QPR ? &ARM::QPRRegClass : &ARM::DPRRegClass); - AddDefaultPred(BuildMI(MBB, - InsertBefore, - DL, - TII->get(QPR ? ARM::VDUPLN32q : ARM::VDUPLN32d), - Out) - .addReg(Reg) - .addImm(Lane)); + BuildMI(MBB, InsertBefore, DL, + TII->get(QPR ? ARM::VDUPLN32q : ARM::VDUPLN32d), Out) + .addReg(Reg) + .addImm(Lane) + .add(predOps(ARMCC::AL)); return Out; } @@ -476,13 +474,11 @@ const DebugLoc &DL, unsigned Ssub0, unsigned Ssub1) { unsigned Out = MRI->createVirtualRegister(&ARM::DPRRegClass); - AddDefaultPred(BuildMI(MBB, - InsertBefore, - DL, - TII->get(ARM::VEXTd32), Out) - .addReg(Ssub0) - .addReg(Ssub1) - .addImm(1)); + BuildMI(MBB, InsertBefore, DL, TII->get(ARM::VEXTd32), Out) + .addReg(Ssub0) + .addReg(Ssub1) + .addImm(1) + .add(predOps(ARMCC::AL)); return Out; } Index: lib/Target/ARM/ARMBaseInstrInfo.h =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.h +++ lib/Target/ARM/ARMBaseInstrInfo.h @@ -418,12 +418,6 @@ // FIXME: Remove when all uses are gone. static inline -const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) { - return MIB.addImm((int64_t)ARMCC::AL).addReg(0); -} - -// FIXME: Remove when all uses are gone. -static inline const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) { return MIB.addReg(0); } Index: lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.cpp +++ lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -698,9 +698,8 @@ if (Subtarget.isMClass()) MIB.addImm(0x800); - AddDefaultPred(MIB); - - MIB.addReg(ARM::CPSR, RegState::Implicit | getKillRegState(KillSrc)); + MIB.add(predOps(ARMCC::AL)) + .addReg(ARM::CPSR, RegState::Implicit | getKillRegState(KillSrc)); } void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB, @@ -718,11 +717,9 @@ else MIB.addImm(8); - MIB.addReg(SrcReg, getKillRegState(KillSrc)); - - AddDefaultPred(MIB); - - MIB.addReg(ARM::CPSR, RegState::Implicit | RegState::Define); + MIB.addReg(SrcReg, getKillRegState(KillSrc)) + .add(predOps(ARMCC::AL)) + .addReg(ARM::CPSR, RegState::Implicit | RegState::Define); } void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, @@ -733,8 +730,9 @@ bool GPRSrc = ARM::GPRRegClass.contains(SrcReg); if (GPRDest && GPRSrc) { - AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)))); + AddDefaultCC(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .add(predOps(ARMCC::AL))); return; } @@ -758,7 +756,7 @@ MIB.addReg(SrcReg, getKillRegState(KillSrc)); if (Opc == ARM::VORRq) MIB.addReg(SrcReg, getKillRegState(KillSrc)); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); return; } @@ -845,7 +843,7 @@ // VORR takes two source operands. if (Opc == ARM::VORRq) Mov.addReg(Src); - Mov = AddDefaultPred(Mov); + Mov = Mov.add(predOps(ARMCC::AL)); // MOVr can set CC. if (Opc == ARM::MOVr) Mov = AddDefaultCC(Mov); @@ -886,35 +884,44 @@ switch (RC->getSize()) { case 4: if (ARM::GPRRegClass.hasSubClassEq(RC)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12)) - .addReg(SrcReg, getKillRegState(isKill)) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::STRi12)) + .addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS)) - .addReg(SrcReg, getKillRegState(isKill)) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VSTRS)) + .addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else llvm_unreachable("Unknown reg class!"); break; case 8: if (ARM::DPRRegClass.hasSubClassEq(RC)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD)) - .addReg(SrcReg, getKillRegState(isKill)) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VSTRD)) + .addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { if (Subtarget.hasV5TEOps()) { MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::STRD)); AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); - MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO); - - AddDefaultPred(MIB); + MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else { // Fallback to STM instruction, which has existed since the dawn of // time. - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STMIA)) - .addFrameIndex(FI).addMemOperand(MMO)); + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::STMIA)) + .addFrameIndex(FI) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); } @@ -925,15 +932,18 @@ if (ARM::DPairRegClass.hasSubClassEq(RC)) { // Use aligned spills if the stack can be realigned. if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64)) - .addFrameIndex(FI).addImm(16) - .addReg(SrcReg, getKillRegState(isKill)) - .addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VST1q64)) + .addFrameIndex(FI) + .addImm(16) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA)) - .addReg(SrcReg, getKillRegState(isKill)) - .addFrameIndex(FI) - .addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VSTMQIA)) + .addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FI) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } } else llvm_unreachable("Unknown reg class!"); @@ -942,15 +952,17 @@ if (ARM::DTripleRegClass.hasSubClassEq(RC)) { // Use aligned spills if the stack can be realigned. if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64TPseudo)) - .addFrameIndex(FI).addImm(16) - .addReg(SrcReg, getKillRegState(isKill)) - .addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VST1d64TPseudo)) + .addFrameIndex(FI) + .addImm(16) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) - .addFrameIndex(FI)) - .addMemOperand(MMO); + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) + .addFrameIndex(FI) + .add(predOps(ARMCC::AL)) + .addMemOperand(MMO); MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); @@ -963,15 +975,17 @@ if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { // FIXME: It's possible to only store part of the QQ register if the // spilled def has a sub-register index. - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo)) - .addFrameIndex(FI).addImm(16) - .addReg(SrcReg, getKillRegState(isKill)) - .addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo)) + .addFrameIndex(FI) + .addImm(16) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) - .addFrameIndex(FI)) - .addMemOperand(MMO); + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) + .addFrameIndex(FI) + .add(predOps(ARMCC::AL)) + .addMemOperand(MMO); MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); @@ -982,10 +996,10 @@ break; case 64: if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) - .addFrameIndex(FI)) - .addMemOperand(MMO); + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) + .addFrameIndex(FI) + .add(predOps(ARMCC::AL)) + .addMemOperand(MMO); MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); @@ -1068,19 +1082,28 @@ switch (RC->getSize()) { case 4: if (ARM::GPRRegClass.hasSubClassEq(RC)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else llvm_unreachable("Unknown reg class!"); break; case 8: if (ARM::DPRRegClass.hasSubClassEq(RC)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { MachineInstrBuilder MIB; @@ -1088,14 +1111,15 @@ MIB = BuildMI(MBB, I, DL, get(ARM::LDRD)); AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); - MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO); - - AddDefaultPred(MIB); + MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else { // Fallback to LDM instruction, which has existed since the dawn of // time. - MIB = AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDMIA)) - .addFrameIndex(FI).addMemOperand(MMO)); + MIB = BuildMI(MBB, I, DL, get(ARM::LDMIA)) + .addFrameIndex(FI) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); } @@ -1108,13 +1132,16 @@ case 16: if (ARM::DPairRegClass.hasSubClassEq(RC)) { if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) - .addFrameIndex(FI).addImm(16) - .addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) + .addFrameIndex(FI) + .addImm(16) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) - .addFrameIndex(FI) - .addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) + .addFrameIndex(FI) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } } else llvm_unreachable("Unknown reg class!"); @@ -1122,14 +1149,16 @@ case 24: if (ARM::DTripleRegClass.hasSubClassEq(RC)) { if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg) - .addFrameIndex(FI).addImm(16) - .addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg) + .addFrameIndex(FI) + .addImm(16) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) - .addFrameIndex(FI) - .addMemOperand(MMO)); + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) + .addFrameIndex(FI) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); @@ -1142,14 +1171,16 @@ case 32: if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) - .addFrameIndex(FI).addImm(16) - .addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) + .addFrameIndex(FI) + .addImm(16) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) - .addFrameIndex(FI)) - .addMemOperand(MMO); + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) + .addFrameIndex(FI) + .add(predOps(ARMCC::AL)) + .addMemOperand(MMO); MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); @@ -1162,10 +1193,10 @@ break; case 64: if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) - .addFrameIndex(FI)) - .addMemOperand(MMO); + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) + .addFrameIndex(FI) + .add(predOps(ARMCC::AL)) + .addMemOperand(MMO); MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); @@ -1262,8 +1293,8 @@ STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA)); } - AddDefaultPred(LDM.addOperand(MI->getOperand(3))); - AddDefaultPred(STM.addOperand(MI->getOperand(2))); + LDM.addOperand(MI->getOperand(3)).add(predOps(ARMCC::AL)); + STM.addOperand(MI->getOperand(2)).add(predOps(ARMCC::AL)); // Sort the scratch registers into ascending order. const TargetRegisterInfo &TRI = getRegisterInfo(); @@ -1346,7 +1377,7 @@ MI.setDesc(get(ARM::VMOVD)); MI.getOperand(0).setReg(DstRegD); MI.getOperand(1).setReg(SrcRegD); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); // We are now reading SrcRegD instead of SrcRegS. This may upset the // register scavenger and machine verifier, so we need to indicate that we @@ -2797,11 +2828,11 @@ unsigned Reg1 = UseMI.getOperand(OpIdx).getReg(); bool isKill = UseMI.getOperand(OpIdx).isKill(); unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg)); - AddDefaultCC( - AddDefaultPred(BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), - get(NewUseOpc), NewReg) - .addReg(Reg1, getKillRegState(isKill)) - .addImm(SOImmValV1))); + AddDefaultCC(BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), + get(NewUseOpc), NewReg) + .addReg(Reg1, getKillRegState(isKill)) + .addImm(SOImmValV1) + .add(predOps(ARMCC::AL))); UseMI.setDesc(get(NewUseOpc)); UseMI.getOperand(1).setReg(NewReg); UseMI.getOperand(1).setIsKill(); @@ -4180,14 +4211,14 @@ MachineMemOperand::MOInvariant; MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, 4); - MIB.addMemOperand(MMO); - AddDefaultPred(MIB); + MIB.addMemOperand(MMO).add(predOps(ARMCC::AL)); } MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg); - MIB.addReg(Reg, RegState::Kill).addImm(0); - MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); - AddDefaultPred(MIB); + MIB.addReg(Reg, RegState::Kill) + .addImm(0) + .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()) + .add(predOps(ARMCC::AL)); } bool @@ -4345,8 +4376,10 @@ // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits) MI.setDesc(get(ARM::VORRd)); - AddDefaultPred( - MIB.addReg(DstReg, RegState::Define).addReg(SrcReg).addReg(SrcReg)); + MIB.addReg(DstReg, RegState::Define) + .addReg(SrcReg) + .addReg(SrcReg) + .add(predOps(ARMCC::AL)); break; case ARM::VMOVRS: if (Domain != ExeNEON) @@ -4366,9 +4399,10 @@ // Note that DSrc has been widened and the other lane may be undef, which // contaminates the entire register. MI.setDesc(get(ARM::VGETLNi32)); - AddDefaultPred(MIB.addReg(DstReg, RegState::Define) - .addReg(DReg, RegState::Undef) - .addImm(Lane)); + MIB.addReg(DstReg, RegState::Define) + .addReg(DReg, RegState::Undef) + .addImm(Lane) + .add(predOps(ARMCC::AL)); // The old source should be an implicit use, otherwise we might think it // was dead before here. @@ -4398,8 +4432,8 @@ MIB.addReg(DReg, RegState::Define) .addReg(DReg, getUndefRegState(!MI.readsRegister(DReg, TRI))) .addReg(SrcReg) - .addImm(Lane); - AddDefaultPred(MIB); + .addImm(Lane) + .add(predOps(ARMCC::AL)); // The narrower destination must be marked as set to keep previous chains // in place. @@ -4433,8 +4467,8 @@ MI.setDesc(get(ARM::VDUPLN32d)); MIB.addReg(DDst, RegState::Define) .addReg(DDst, getUndefRegState(!MI.readsRegister(DDst, TRI))) - .addImm(SrcLane); - AddDefaultPred(MIB); + .addImm(SrcLane) + .add(predOps(ARMCC::AL)); // Neither the source or the destination are naturally represented any // more, so add them in manually. @@ -4470,10 +4504,9 @@ CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst; CurUndef = !MI.readsRegister(CurReg, TRI); - NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); - - NewMIB.addImm(1); - AddDefaultPred(NewMIB); + NewMIB.addReg(CurReg, getUndefRegState(CurUndef)) + .addImm(1) + .add(predOps(ARMCC::AL)); if (SrcLane == DstLane) NewMIB.addReg(SrcReg, RegState::Implicit); @@ -4489,10 +4522,9 @@ CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst; CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI); - MIB.addReg(CurReg, getUndefRegState(CurUndef)); - - MIB.addImm(1); - AddDefaultPred(MIB); + MIB.addReg(CurReg, getUndefRegState(CurUndef)) + .addImm(1) + .add(predOps(ARMCC::AL)); if (SrcLane != DstLane) MIB.addReg(SrcReg, RegState::Implicit); @@ -4613,9 +4645,9 @@ // Insert the dependency-breaking FCONSTD before MI. // 96 is the encoding of 0.5, but the actual value doesn't matter here. - AddDefaultPred( - BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg) - .addImm(96)); + BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg) + .addImm(96) + .add(predOps(ARMCC::AL)); MI.addRegisterKilled(DReg, TRI, true); } Index: lib/Target/ARM/ARMBaseRegisterInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -609,7 +609,7 @@ .addFrameIndex(FrameIdx).addImm(Offset); if (!AFI->isThumb1OnlyFunction()) - AddDefaultCC(AddDefaultPred(MIB)); + AddDefaultCC(MIB.add(predOps(ARMCC::AL))); } void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg, Index: lib/Target/ARM/ARMCallLowering.cpp =================================================================== --- lib/Target/ARM/ARMCallLowering.cpp +++ lib/Target/ARM/ARMCallLowering.cpp @@ -107,7 +107,7 @@ const Value *Val, unsigned VReg) const { assert(!Val == !VReg && "Return value without a vreg"); - auto Ret = AddDefaultPred(MIRBuilder.buildInstrNoInsert(ARM::BX_RET)); + auto Ret = MIRBuilder.buildInstrNoInsert(ARM::BX_RET).add(predOps(ARMCC::AL)); if (!lowerReturnVal(MIRBuilder, Val, VReg, Ret)) return false; Index: lib/Target/ARM/ARMExpandPseudoInsts.cpp =================================================================== --- lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -797,7 +797,7 @@ .addReg(Desired.getReg(), RegState::Kill); if (!IsThumb) MIB.addImm(0); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); } // .Lloadcmp: @@ -814,12 +814,13 @@ MIB.addReg(Addr.getReg()); if (LdrexOp == ARM::t2LDREX) MIB.addImm(0); // a 32-bit Thumb ldrex (only) allows an offset. - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr; - AddDefaultPred(BuildMI(LoadCmpBB, DL, TII->get(CMPrr)) - .addReg(Dest.getReg(), getKillRegState(Dest.isDead())) - .addOperand(Desired)); + BuildMI(LoadCmpBB, DL, TII->get(CMPrr)) + .addReg(Dest.getReg(), getKillRegState(Dest.isDead())) + .addOperand(Desired) + .add(predOps(ARMCC::AL)); unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc; BuildMI(LoadCmpBB, DL, TII->get(Bcc)) .addMBB(DoneBB) @@ -842,12 +843,13 @@ MIB.addOperand(Addr); if (StrexOp == ARM::t2STREX) MIB.addImm(0); // a 32-bit Thumb strex (only) allows an offset. - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri; - AddDefaultPred(BuildMI(StoreBB, DL, TII->get(CMPri)) - .addReg(StatusReg, RegState::Kill) - .addImm(0)); + BuildMI(StoreBB, DL, TII->get(CMPri)) + .addReg(StatusReg, RegState::Kill) + .addImm(0) + .add(predOps(ARMCC::AL)); BuildMI(StoreBB, DL, TII->get(Bcc)) .addMBB(LoadCmpBB) .addImm(ARMCC::NE) @@ -927,13 +929,13 @@ MachineInstrBuilder MIB; MIB = BuildMI(LoadCmpBB, DL, TII->get(LDREXD)); addExclusiveRegPair(MIB, Dest, RegState::Define, IsThumb, TRI); - MIB.addReg(Addr.getReg()); - AddDefaultPred(MIB); + MIB.addReg(Addr.getReg()).add(predOps(ARMCC::AL)); unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr; - AddDefaultPred(BuildMI(LoadCmpBB, DL, TII->get(CMPrr)) - .addReg(DestLo, getKillRegState(Dest.isDead())) - .addReg(DesiredLo, getKillRegState(Desired.isDead()))); + BuildMI(LoadCmpBB, DL, TII->get(CMPrr)) + .addReg(DestLo, getKillRegState(Dest.isDead())) + .addReg(DesiredLo, getKillRegState(Desired.isDead())) + .add(predOps(ARMCC::AL)); BuildMI(LoadCmpBB, DL, TII->get(CMPrr)) .addReg(DestHi, getKillRegState(Dest.isDead())) @@ -959,13 +961,13 @@ unsigned STREXD = IsThumb ? ARM::t2STREXD : ARM::STREXD; MIB = BuildMI(StoreBB, DL, TII->get(STREXD), StatusReg); addExclusiveRegPair(MIB, New, 0, IsThumb, TRI); - MIB.addOperand(Addr); - AddDefaultPred(MIB); + MIB.addOperand(Addr).add(predOps(ARMCC::AL)); unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri; - AddDefaultPred(BuildMI(StoreBB, DL, TII->get(CMPri)) - .addReg(StatusReg, RegState::Kill) - .addImm(0)); + BuildMI(StoreBB, DL, TII->get(CMPri)) + .addReg(StatusReg, RegState::Kill) + .addImm(0) + .add(predOps(ARMCC::AL)); BuildMI(StoreBB, DL, TII->get(Bcc)) .addMBB(LoadCmpBB) .addImm(ARMCC::NE) @@ -1187,10 +1189,11 @@ "bits set."); unsigned bicOpc = AFI->isThumbFunction() ? ARM::t2BICri : ARM::BICri; - AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(), - TII->get(bicOpc), ARM::R6) - .addReg(ARM::R6, RegState::Kill) - .addImm(MaxAlign-1))); + AddDefaultCC( + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(bicOpc), ARM::R6) + .addReg(ARM::R6, RegState::Kill) + .addImm(MaxAlign - 1) + .add(predOps(ARMCC::AL))); } } @@ -1201,24 +1204,25 @@ case ARM::MOVsrl_flag: case ARM::MOVsra_flag: { // These are just fancy MOVs instructions. - AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi), - MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)) - .addImm(ARM_AM::getSORegOpc((Opcode == ARM::MOVsrl_flag ? - ARM_AM::lsr : ARM_AM::asr), - 1))) - .addReg(ARM::CPSR, RegState::Define); + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi), + MI.getOperand(0).getReg()) + .addOperand(MI.getOperand(1)) + .addImm(ARM_AM::getSORegOpc( + (Opcode == ARM::MOVsrl_flag ? ARM_AM::lsr : ARM_AM::asr), 1)) + .add(predOps(ARMCC::AL)) + .addReg(ARM::CPSR, RegState::Define); MI.eraseFromParent(); return true; } case ARM::RRX: { // This encodes as "MOVs Rd, Rm, rrx MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),TII->get(ARM::MOVsi), - MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)) - .addImm(ARM_AM::getSORegOpc(ARM_AM::rrx, 0))) - .addReg(0); + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi), + MI.getOperand(0).getReg()) + .addOperand(MI.getOperand(1)) + .addImm(ARM_AM::getSORegOpc(ARM_AM::rrx, 0)) + .add(predOps(ARMCC::AL)) + .addReg(0); TransferImpOps(MI, MIB, MIB); MI.eraseFromParent(); return true; @@ -1248,9 +1252,9 @@ unsigned DstReg = MI.getOperand(0).getReg(); bool DstIsDead = MI.getOperand(0).isDead(); MachineInstrBuilder MIB1 = - AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(), - TII->get(NewLdOpc), DstReg) - .addOperand(MI.getOperand(1))); + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg) + .addOperand(MI.getOperand(1)) + .add(predOps(ARMCC::AL)); MIB1->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD)) @@ -1299,7 +1303,7 @@ .addConstantPoolIndex(MCP->getConstantPoolIndex(CPV, 4)); if (IsARM) MIB.addImm(0); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); if (IsPIC) { MachineInstrBuilder MIB = @@ -1309,7 +1313,7 @@ .addImm(ARMPCLabelIndex); if (IsARM) - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); } MI.eraseFromParent(); @@ -1348,7 +1352,7 @@ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead)) .addReg(DstReg).addImm(LabelId); if (isARM) { - AddDefaultPred(MIB3); + MIB3.add(predOps(ARMCC::AL)); if (Opcode == ARM::MOV_ga_pcrel_ldr) MIB3->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); } Index: lib/Target/ARM/ARMFastISel.cpp =================================================================== --- lib/Target/ARM/ARMFastISel.cpp +++ lib/Target/ARM/ARMFastISel.cpp @@ -256,7 +256,7 @@ // Are we NEON in ARM mode and have a predicate operand? If so, I know // we're not predicable but add it anyways. if (isARMNEONPred(MI)) - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); // Do we optionally set a predicate? Preds is size > 0 iff the predicate // defines CPSR. All other OptionalDefines in ARM are the CCR register. @@ -2230,7 +2230,7 @@ DbgLoc, TII.get(CallOpc)); // BL / BLX don't take a predicate, but tBL / tBLX do. if (isThumb2) - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); if (Subtarget->genLongCalls()) MIB.addReg(CalleeReg); else @@ -2373,7 +2373,7 @@ // ARM calls don't take a predicate, but tBL / tBLX do. if(isThumb2) - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); if (UseReg) MIB.addReg(CalleeReg); else if (!IntrMemName) @@ -2687,7 +2687,9 @@ if (setsCPSR) MIB.addReg(ARM::CPSR, RegState::Define); SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); - AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc)); + MIB.addReg(SrcReg, isKill * RegState::Kill) + .addImm(ImmEnc) + .add(predOps(ARMCC::AL)); if (hasS) AddDefaultCC(MIB); // Second instruction consumes the first's result. @@ -2933,7 +2935,7 @@ .addConstantPoolIndex(Idx); if (Opc == ARM::LDRcp) MIB.addImm(0); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); // Fix the address by adding pc. unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); @@ -2944,7 +2946,7 @@ .addReg(TempReg) .addImm(ARMPCLabelIndex); if (!Subtarget->isThumb()) - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); if (UseGOT_PREL && Subtarget->isThumb()) { unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); Index: lib/Target/ARM/ARMFrameLowering.cpp =================================================================== --- lib/Target/ARM/ARMFrameLowering.cpp +++ lib/Target/ARM/ARMFrameLowering.cpp @@ -252,35 +252,37 @@ // lsr Reg, Reg, log2(Alignment) // lsl Reg, Reg, log2(Alignment) if (CanUseBFC) { - AddDefaultPred(BuildMI(MBB, MBBI, DL, TII.get(ARM::BFC), Reg) - .addReg(Reg, RegState::Kill) - .addImm(~AlignMask)); + BuildMI(MBB, MBBI, DL, TII.get(ARM::BFC), Reg) + .addReg(Reg, RegState::Kill) + .addImm(~AlignMask) + .add(predOps(ARMCC::AL)); } else if (AlignMask <= 255) { - AddDefaultCC( - AddDefaultPred(BuildMI(MBB, MBBI, DL, TII.get(ARM::BICri), Reg) - .addReg(Reg, RegState::Kill) - .addImm(AlignMask))); + AddDefaultCC(BuildMI(MBB, MBBI, DL, TII.get(ARM::BICri), Reg) + .addReg(Reg, RegState::Kill) + .addImm(AlignMask) + .add(predOps(ARMCC::AL))); } else { assert(!MustBeSingleInstruction && "Shouldn't call emitAligningInstructions demanding a single " "instruction to be emitted for large stack alignment for a target " "without BFC."); - AddDefaultCC(AddDefaultPred( - BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg) - .addReg(Reg, RegState::Kill) - .addImm(ARM_AM::getSORegOpc(ARM_AM::lsr, NrBitsToZero)))); - AddDefaultCC(AddDefaultPred( - BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg) - .addReg(Reg, RegState::Kill) - .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, NrBitsToZero)))); + AddDefaultCC(BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg) + .addReg(Reg, RegState::Kill) + .addImm(ARM_AM::getSORegOpc(ARM_AM::lsr, NrBitsToZero)) + .add(predOps(ARMCC::AL))); + AddDefaultCC(BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg) + .addReg(Reg, RegState::Kill) + .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, NrBitsToZero)) + .add(predOps(ARMCC::AL))); } } else { // Since this is only reached for Thumb-2 targets, the BFC instruction // should always be available. assert(CanUseBFC); - AddDefaultPred(BuildMI(MBB, MBBI, DL, TII.get(ARM::t2BFC), Reg) - .addReg(Reg, RegState::Kill) - .addImm(~AlignMask)); + BuildMI(MBB, MBBI, DL, TII.get(ARM::t2BFC), Reg) + .addReg(Reg, RegState::Kill) + .addImm(~AlignMask) + .add(predOps(ARMCC::AL)); } } @@ -448,9 +450,10 @@ uint32_t NumWords = NumBytes >> 2; if (NumWords < 65536) - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), ARM::R4) - .addImm(NumWords) - .setMIFlags(MachineInstr::FrameSetup)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), ARM::R4) + .addImm(NumWords) + .setMIFlags(MachineInstr::FrameSetup) + .add(predOps(ARMCC::AL)); else BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi32imm), ARM::R4) .addImm(NumWords) @@ -481,11 +484,11 @@ break; } - AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), - ARM::SP) - .addReg(ARM::SP, RegState::Kill) - .addReg(ARM::R4, RegState::Kill) - .setMIFlags(MachineInstr::FrameSetup))); + AddDefaultCC(BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), ARM::SP) + .addReg(ARM::SP, RegState::Kill) + .addReg(ARM::R4, RegState::Kill) + .setMIFlags(MachineInstr::FrameSetup) + .add(predOps(ARMCC::AL))); NumBytes = 0; } @@ -657,12 +660,14 @@ // -- out lower bits in r4 // mov sp, r4 // FIXME: It will be better just to find spare register here. - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::R4) - .addReg(ARM::SP, RegState::Kill)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::R4) + .addReg(ARM::SP, RegState::Kill) + .add(predOps(ARMCC::AL)); emitAligningInstructions(MF, AFI, TII, MBB, MBBI, dl, ARM::R4, MaxAlign, false); - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) - .addReg(ARM::R4, RegState::Kill)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) + .addReg(ARM::R4, RegState::Kill) + .add(predOps(ARMCC::AL)); } AFI->setShouldRestoreSPFromFP(true); @@ -680,9 +685,9 @@ .addReg(ARM::SP) .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); else - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), - RegInfo->getBaseRegister()) - .addReg(ARM::SP)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), RegInfo->getBaseRegister()) + .addReg(ARM::SP) + .add(predOps(ARMCC::AL)); } // If the frame has variable sized objects then the epilogue must restore @@ -757,9 +762,9 @@ "No scratch register to restore SP from FP!"); emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::R4, FramePtr, -NumBytes, ARMCC::AL, 0, TII); - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), - ARM::SP) - .addReg(ARM::R4)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) + .addReg(ARM::R4) + .add(predOps(ARMCC::AL)); } } else { // Thumb2 or ARM. @@ -767,9 +772,9 @@ BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP) .addReg(FramePtr).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); else - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), - ARM::SP) - .addReg(FramePtr)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) + .addReg(FramePtr) + .add(predOps(ARMCC::AL)); } } else if (NumBytes && !tryFoldSPUpdateIntoPushPop(STI, MF, &*MBBI, NumBytes)) @@ -936,18 +941,19 @@ }); if (Regs.size() > 1 || StrOpc== 0) { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(StmOpc), ARM::SP) - .addReg(ARM::SP).setMIFlags(MIFlags)); + MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StmOpc), ARM::SP) + .addReg(ARM::SP) + .setMIFlags(MIFlags) + .add(predOps(ARMCC::AL)); for (unsigned i = 0, e = Regs.size(); i < e; ++i) MIB.addReg(Regs[i].first, getKillRegState(Regs[i].second)); } else if (Regs.size() == 1) { - MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc), - ARM::SP) - .addReg(Regs[0].first, getKillRegState(Regs[0].second)) - .addReg(ARM::SP).setMIFlags(MIFlags) - .addImm(-4); - AddDefaultPred(MIB); + BuildMI(MBB, MI, DL, TII.get(StrOpc), ARM::SP) + .addReg(Regs[0].first, getKillRegState(Regs[0].second)) + .addReg(ARM::SP) + .setMIFlags(MIFlags) + .addImm(-4) + .add(predOps(ARMCC::AL)); } Regs.clear(); @@ -1027,9 +1033,9 @@ }); if (Regs.size() > 1 || LdrOpc == 0) { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(LdmOpc), ARM::SP) - .addReg(ARM::SP)); + MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdmOpc), ARM::SP) + .addReg(ARM::SP) + .add(predOps(ARMCC::AL)); for (unsigned i = 0, e = Regs.size(); i < e; ++i) MIB.addReg(Regs[i], getDefRegState(true)); if (DeleteRet && MI != MBB.end()) { @@ -1053,7 +1059,7 @@ MIB.addImm(ARM_AM::getAM2Opc(ARM_AM::add, 4, ARM_AM::no_shift)); } else MIB.addImm(4); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); } Regs.clear(); @@ -1114,9 +1120,10 @@ // sub r4, sp, #numregs * 8 // The immediate is <= 64, so it doesn't need any special encoding. unsigned Opc = isThumb ? ARM::t2SUBri : ARM::SUBri; - AddDefaultCC(AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4) - .addReg(ARM::SP) - .addImm(8 * NumAlignedDPRCS2Regs))); + AddDefaultCC(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4) + .addReg(ARM::SP) + .addImm(8 * NumAlignedDPRCS2Regs) + .add(predOps(ARMCC::AL))); unsigned MaxAlign = MF.getFrameInfo().getMaxAlignment(); // We must set parameter MustBeSingleInstruction to true, since @@ -1132,8 +1139,8 @@ // Leave r4 live, it is used below. Opc = isThumb ? ARM::tMOVr : ARM::MOVr; MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(Opc), ARM::SP) - .addReg(ARM::R4); - MIB = AddDefaultPred(MIB); + .addReg(ARM::R4) + .add(predOps(ARMCC::AL)); if (!isThumb) AddDefaultCC(MIB); @@ -1147,11 +1154,12 @@ unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, &ARM::QQPRRegClass); MBB.addLiveIn(SupReg); - AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Qwb_fixed), - ARM::R4) - .addReg(ARM::R4, RegState::Kill).addImm(16) - .addReg(NextReg) - .addReg(SupReg, RegState::ImplicitKill)); + BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Qwb_fixed), ARM::R4) + .addReg(ARM::R4, RegState::Kill) + .addImm(16) + .addReg(NextReg) + .addReg(SupReg, RegState::ImplicitKill) + .add(predOps(ARMCC::AL)); NextReg += 4; NumAlignedDPRCS2Regs -= 4; } @@ -1165,9 +1173,12 @@ unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, &ARM::QQPRRegClass); MBB.addLiveIn(SupReg); - AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Q)) - .addReg(ARM::R4).addImm(16).addReg(NextReg) - .addReg(SupReg, RegState::ImplicitKill)); + BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Q)) + .addReg(ARM::R4) + .addImm(16) + .addReg(NextReg) + .addReg(SupReg, RegState::ImplicitKill) + .add(predOps(ARMCC::AL)); NextReg += 4; NumAlignedDPRCS2Regs -= 4; } @@ -1177,8 +1188,11 @@ unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, &ARM::QPRRegClass); MBB.addLiveIn(SupReg); - AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1q64)) - .addReg(ARM::R4).addImm(16).addReg(SupReg)); + BuildMI(MBB, MI, DL, TII.get(ARM::VST1q64)) + .addReg(ARM::R4) + .addImm(16) + .addReg(SupReg) + .add(predOps(ARMCC::AL)); NextReg += 2; NumAlignedDPRCS2Regs -= 2; } @@ -1187,9 +1201,11 @@ if (NumAlignedDPRCS2Regs) { MBB.addLiveIn(NextReg); // vstr.64 uses addrmode5 which has an offset scale of 4. - AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VSTRD)) - .addReg(NextReg) - .addReg(ARM::R4).addImm((NextReg-R4BaseReg)*2)); + BuildMI(MBB, MI, DL, TII.get(ARM::VSTRD)) + .addReg(NextReg) + .addReg(ARM::R4) + .addImm((NextReg - R4BaseReg) * 2) + .add(predOps(ARMCC::AL)); } // The last spill instruction inserted should kill the scratch register r4. @@ -1254,8 +1270,10 @@ assert(!AFI->isThumb1OnlyFunction() && "Can't realign stack for thumb1"); unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; - AddDefaultCC(AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4) - .addFrameIndex(D8SpillFI).addImm(0))); + AddDefaultCC(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4) + .addFrameIndex(D8SpillFI) + .addImm(0) + .add(predOps(ARMCC::AL))); // Now restore NumAlignedDPRCS2Regs registers starting from d8. unsigned NextReg = ARM::D8; @@ -1264,10 +1282,12 @@ if (NumAlignedDPRCS2Regs >= 6) { unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, &ARM::QQPRRegClass); - AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Qwb_fixed), NextReg) - .addReg(ARM::R4, RegState::Define) - .addReg(ARM::R4, RegState::Kill).addImm(16) - .addReg(SupReg, RegState::ImplicitDefine)); + BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Qwb_fixed), NextReg) + .addReg(ARM::R4, RegState::Define) + .addReg(ARM::R4, RegState::Kill) + .addImm(16) + .addReg(SupReg, RegState::ImplicitDefine) + .add(predOps(ARMCC::AL)); NextReg += 4; NumAlignedDPRCS2Regs -= 4; } @@ -1280,9 +1300,11 @@ if (NumAlignedDPRCS2Regs >= 4) { unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, &ARM::QQPRRegClass); - AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Q), NextReg) - .addReg(ARM::R4).addImm(16) - .addReg(SupReg, RegState::ImplicitDefine)); + BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Q), NextReg) + .addReg(ARM::R4) + .addImm(16) + .addReg(SupReg, RegState::ImplicitDefine) + .add(predOps(ARMCC::AL)); NextReg += 4; NumAlignedDPRCS2Regs -= 4; } @@ -1291,16 +1313,20 @@ if (NumAlignedDPRCS2Regs >= 2) { unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, &ARM::QPRRegClass); - AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1q64), SupReg) - .addReg(ARM::R4).addImm(16)); + BuildMI(MBB, MI, DL, TII.get(ARM::VLD1q64), SupReg) + .addReg(ARM::R4) + .addImm(16) + .add(predOps(ARMCC::AL)); NextReg += 2; NumAlignedDPRCS2Regs -= 2; } // Finally, use a vanilla vldr.64 for the remaining odd register. if (NumAlignedDPRCS2Regs) - AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLDRD), NextReg) - .addReg(ARM::R4).addImm(2*(NextReg-R4BaseReg))); + BuildMI(MBB, MI, DL, TII.get(ARM::VLDRD), NextReg) + .addReg(ARM::R4) + .addImm(2 * (NextReg - R4BaseReg)) + .add(predOps(ARMCC::AL)); // Last store kills r4. std::prev(MI)->addRegisterKilled(ARM::R4, TRI); @@ -2081,12 +2107,17 @@ // SR1: Scratch Register #1 // push {SR0, SR1} if (Thumb) { - AddDefaultPred(BuildMI(PrevStackMBB, DL, TII.get(ARM::tPUSH))) - .addReg(ScratchReg0).addReg(ScratchReg1); + BuildMI(PrevStackMBB, DL, TII.get(ARM::tPUSH)) + .add(predOps(ARMCC::AL)) + .addReg(ScratchReg0) + .addReg(ScratchReg1); } else { - AddDefaultPred(BuildMI(PrevStackMBB, DL, TII.get(ARM::STMDB_UPD)) - .addReg(ARM::SP, RegState::Define).addReg(ARM::SP)) - .addReg(ScratchReg0).addReg(ScratchReg1); + BuildMI(PrevStackMBB, DL, TII.get(ARM::STMDB_UPD)) + .addReg(ARM::SP, RegState::Define) + .addReg(ARM::SP) + .add(predOps(ARMCC::AL)) + .addReg(ScratchReg0) + .addReg(ScratchReg1); } // Emit the relevant DWARF information about the change in stack pointer as @@ -2106,21 +2137,28 @@ // mov SR1, sp if (Thumb) { - AddDefaultPred(BuildMI(McrMBB, DL, TII.get(ARM::tMOVr), ScratchReg1) - .addReg(ARM::SP)); + BuildMI(McrMBB, DL, TII.get(ARM::tMOVr), ScratchReg1) + .addReg(ARM::SP) + .add(predOps(ARMCC::AL)); } else if (CompareStackPointer) { - AddDefaultPred(BuildMI(McrMBB, DL, TII.get(ARM::MOVr), ScratchReg1) - .addReg(ARM::SP)).addReg(0); + BuildMI(McrMBB, DL, TII.get(ARM::MOVr), ScratchReg1) + .addReg(ARM::SP) + .add(predOps(ARMCC::AL)) + .addReg(0); } // sub SR1, sp, #StackSize if (!CompareStackPointer && Thumb) { - AddDefaultPred( - AddDefaultCC(BuildMI(McrMBB, DL, TII.get(ARM::tSUBi8), ScratchReg1)) - .addReg(ScratchReg1).addImm(AlignedStackSize)); + AddDefaultCC(BuildMI(McrMBB, DL, TII.get(ARM::tSUBi8), ScratchReg1)) + .addReg(ScratchReg1) + .addImm(AlignedStackSize) + .add(predOps(ARMCC::AL)); } else if (!CompareStackPointer) { - AddDefaultPred(BuildMI(McrMBB, DL, TII.get(ARM::SUBri), ScratchReg1) - .addReg(ARM::SP).addImm(AlignedStackSize)).addReg(0); + BuildMI(McrMBB, DL, TII.get(ARM::SUBri), ScratchReg1) + .addReg(ARM::SP) + .addImm(AlignedStackSize) + .add(predOps(ARMCC::AL)) + .addReg(0); } if (Thumb && ST->isThumb1Only()) { @@ -2131,21 +2169,25 @@ unsigned CPI = MCP->getConstantPoolIndex(NewCPV, 4); // ldr SR0, [pc, offset(STACK_LIMIT)] - AddDefaultPred(BuildMI(GetMBB, DL, TII.get(ARM::tLDRpci), ScratchReg0) - .addConstantPoolIndex(CPI)); + BuildMI(GetMBB, DL, TII.get(ARM::tLDRpci), ScratchReg0) + .addConstantPoolIndex(CPI) + .add(predOps(ARMCC::AL)); // ldr SR0, [SR0] - AddDefaultPred(BuildMI(GetMBB, DL, TII.get(ARM::tLDRi), ScratchReg0) - .addReg(ScratchReg0).addImm(0)); + BuildMI(GetMBB, DL, TII.get(ARM::tLDRi), ScratchReg0) + .addReg(ScratchReg0) + .addImm(0) + .add(predOps(ARMCC::AL)); } else { // Get TLS base address from the coprocessor // mrc p15, #0, SR0, c13, c0, #3 - AddDefaultPred(BuildMI(McrMBB, DL, TII.get(ARM::MRC), ScratchReg0) - .addImm(15) - .addImm(0) - .addImm(13) - .addImm(0) - .addImm(3)); + BuildMI(McrMBB, DL, TII.get(ARM::MRC), ScratchReg0) + .addImm(15) + .addImm(0) + .addImm(13) + .addImm(0) + .addImm(3) + .add(predOps(ARMCC::AL)); // Use the last tls slot on android and a private field of the TCP on linux. assert(ST->isTargetAndroid() || ST->isTargetLinux()); @@ -2153,16 +2195,19 @@ // Get the stack limit from the right offset // ldr SR0, [sr0, #4 * TlsOffset] - AddDefaultPred(BuildMI(GetMBB, DL, TII.get(ARM::LDRi12), ScratchReg0) - .addReg(ScratchReg0).addImm(4 * TlsOffset)); + BuildMI(GetMBB, DL, TII.get(ARM::LDRi12), ScratchReg0) + .addReg(ScratchReg0) + .addImm(4 * TlsOffset) + .add(predOps(ARMCC::AL)); } // Compare stack limit with stack size requested. // cmp SR0, SR1 Opcode = Thumb ? ARM::tCMPr : ARM::CMPrr; - AddDefaultPred(BuildMI(GetMBB, DL, TII.get(Opcode)) - .addReg(ScratchReg0) - .addReg(ScratchReg1)); + BuildMI(GetMBB, DL, TII.get(Opcode)) + .addReg(ScratchReg0) + .addReg(ScratchReg1) + .add(predOps(ARMCC::AL)); // This jump is taken if StackLimit < SP - stack required. Opcode = Thumb ? ARM::tBcc : ARM::Bcc; @@ -2178,32 +2223,38 @@ // Pass first argument for the __morestack by Scratch Register #0. // The amount size of stack required if (Thumb) { - AddDefaultPred(AddDefaultCC(BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), - ScratchReg0)).addImm(AlignedStackSize)); + AddDefaultCC(BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), ScratchReg0)) + .addImm(AlignedStackSize) + .add(predOps(ARMCC::AL)); } else { - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::MOVi), ScratchReg0) - .addImm(AlignedStackSize)).addReg(0); + BuildMI(AllocMBB, DL, TII.get(ARM::MOVi), ScratchReg0) + .addImm(AlignedStackSize) + .add(predOps(ARMCC::AL)) + .addReg(0); } // Pass second argument for the __morestack by Scratch Register #1. // The amount size of stack consumed to save function arguments. if (Thumb) { - AddDefaultPred( - AddDefaultCC(BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), ScratchReg1)) - .addImm(alignToARMConstant(ARMFI->getArgumentStackSize()))); + AddDefaultCC(BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), ScratchReg1)) + .addImm(alignToARMConstant(ARMFI->getArgumentStackSize())) + .add(predOps(ARMCC::AL)); } else { - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::MOVi), ScratchReg1) - .addImm(alignToARMConstant(ARMFI->getArgumentStackSize()))) - .addReg(0); + BuildMI(AllocMBB, DL, TII.get(ARM::MOVi), ScratchReg1) + .addImm(alignToARMConstant(ARMFI->getArgumentStackSize())) + .add(predOps(ARMCC::AL)) + .addReg(0); } // push {lr} - Save return address of this function. if (Thumb) { - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::tPUSH))) + BuildMI(AllocMBB, DL, TII.get(ARM::tPUSH)) + .add(predOps(ARMCC::AL)) .addReg(ARM::LR); } else { - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::STMDB_UPD)) - .addReg(ARM::SP, RegState::Define) - .addReg(ARM::SP)) + BuildMI(AllocMBB, DL, TII.get(ARM::STMDB_UPD)) + .addReg(ARM::SP, RegState::Define) + .addReg(ARM::SP) + .add(predOps(ARMCC::AL)) .addReg(ARM::LR); } @@ -2220,7 +2271,8 @@ // Call __morestack(). if (Thumb) { - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::tBL))) + BuildMI(AllocMBB, DL, TII.get(ARM::tBL)) + .add(predOps(ARMCC::AL)) .addExternalSymbol("__morestack"); } else { BuildMI(AllocMBB, DL, TII.get(ARM::BL)) @@ -2230,22 +2282,26 @@ // pop {lr} - Restore return address of this original function. if (Thumb) { if (ST->isThumb1Only()) { - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::tPOP))) - .addReg(ScratchReg0); - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::tMOVr), ARM::LR) - .addReg(ScratchReg0)); + BuildMI(AllocMBB, DL, TII.get(ARM::tPOP)) + .add(predOps(ARMCC::AL)) + .addReg(ScratchReg0); + BuildMI(AllocMBB, DL, TII.get(ARM::tMOVr), ARM::LR) + .addReg(ScratchReg0) + .add(predOps(ARMCC::AL)); } else { - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::t2LDR_POST)) - .addReg(ARM::LR, RegState::Define) - .addReg(ARM::SP, RegState::Define) - .addReg(ARM::SP) - .addImm(4)); + BuildMI(AllocMBB, DL, TII.get(ARM::t2LDR_POST)) + .addReg(ARM::LR, RegState::Define) + .addReg(ARM::SP, RegState::Define) + .addReg(ARM::SP) + .addImm(4) + .add(predOps(ARMCC::AL)); } } else { - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::LDMIA_UPD)) - .addReg(ARM::SP, RegState::Define) - .addReg(ARM::SP)) - .addReg(ARM::LR); + BuildMI(AllocMBB, DL, TII.get(ARM::LDMIA_UPD)) + .addReg(ARM::SP, RegState::Define) + .addReg(ARM::SP) + .add(predOps(ARMCC::AL)) + .addReg(ARM::LR); } // Restore SR0 and SR1 in case of __morestack() was called. @@ -2253,15 +2309,17 @@ // scratch registers from here. // pop {SR0, SR1} if (Thumb) { - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::tPOP))) - .addReg(ScratchReg0) - .addReg(ScratchReg1); + BuildMI(AllocMBB, DL, TII.get(ARM::tPOP)) + .add(predOps(ARMCC::AL)) + .addReg(ScratchReg0) + .addReg(ScratchReg1); } else { - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::LDMIA_UPD)) - .addReg(ARM::SP, RegState::Define) - .addReg(ARM::SP)) - .addReg(ScratchReg0) - .addReg(ScratchReg1); + BuildMI(AllocMBB, DL, TII.get(ARM::LDMIA_UPD)) + .addReg(ARM::SP, RegState::Define) + .addReg(ARM::SP) + .add(predOps(ARMCC::AL)) + .addReg(ScratchReg0) + .addReg(ScratchReg1); } // Update the CFA offset now that we've popped @@ -2271,20 +2329,22 @@ // bx lr - Return from this function. Opcode = Thumb ? ARM::tBX_RET : ARM::BX_RET; - AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(Opcode))); + BuildMI(AllocMBB, DL, TII.get(Opcode)).add(predOps(ARMCC::AL)); // Restore SR0 and SR1 in case of __morestack() was not called. // pop {SR0, SR1} if (Thumb) { - AddDefaultPred(BuildMI(PostStackMBB, DL, TII.get(ARM::tPOP))) - .addReg(ScratchReg0) - .addReg(ScratchReg1); + BuildMI(PostStackMBB, DL, TII.get(ARM::tPOP)) + .add(predOps(ARMCC::AL)) + .addReg(ScratchReg0) + .addReg(ScratchReg1); } else { - AddDefaultPred(BuildMI(PostStackMBB, DL, TII.get(ARM::LDMIA_UPD)) - .addReg(ARM::SP, RegState::Define) - .addReg(ARM::SP)) - .addReg(ScratchReg0) - .addReg(ScratchReg1); + BuildMI(PostStackMBB, DL, TII.get(ARM::LDMIA_UPD)) + .addReg(ARM::SP, RegState::Define) + .addReg(ARM::SP) + .add(predOps(ARMCC::AL)) + .addReg(ScratchReg0) + .addReg(ScratchReg1); } // Update the CFA offset now that we've popped Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -7820,24 +7820,26 @@ // add r5, pc // str r5, [$jbuf, #+4] ; &jbuf[1] unsigned NewVReg1 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) - .addConstantPoolIndex(CPI) - .addMemOperand(CPMMO)); + BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) + .addConstantPoolIndex(CPI) + .addMemOperand(CPMMO) + .add(predOps(ARMCC::AL)); // Set the low bit because of thumb mode. unsigned NewVReg2 = MRI->createVirtualRegister(TRC); - AddDefaultCC( - AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) + AddDefaultCC(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) .addReg(NewVReg1, RegState::Kill) - .addImm(0x01))); + .addImm(0x01) + .add(predOps(ARMCC::AL))); unsigned NewVReg3 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) .addReg(NewVReg2, RegState::Kill) .addImm(PCLabelId); - AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) - .addReg(NewVReg3, RegState::Kill) - .addFrameIndex(FI) - .addImm(36) // &jbuf[1] :: pc - .addMemOperand(FIMMOSt)); + BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) + .addReg(NewVReg3, RegState::Kill) + .addFrameIndex(FI) + .addImm(36) // &jbuf[1] :: pc + .addMemOperand(FIMMOSt) + .add(predOps(ARMCC::AL)); } else if (isThumb) { // Incoming value: jbuf // ldr.n r1, LCPI1_4 @@ -7847,51 +7849,58 @@ // add r2, $jbuf, #+4 ; &jbuf[1] // str r1, [r2] unsigned NewVReg1 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) - .addConstantPoolIndex(CPI) - .addMemOperand(CPMMO)); + BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) + .addConstantPoolIndex(CPI) + .addMemOperand(CPMMO) + .add(predOps(ARMCC::AL)); unsigned NewVReg2 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) .addReg(NewVReg1, RegState::Kill) .addImm(PCLabelId); // Set the low bit because of thumb mode. unsigned NewVReg3 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) - .addReg(ARM::CPSR, RegState::Define) - .addImm(1)); + BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) + .addReg(ARM::CPSR, RegState::Define) + .addImm(1) + .add(predOps(ARMCC::AL)); unsigned NewVReg4 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) - .addReg(ARM::CPSR, RegState::Define) - .addReg(NewVReg2, RegState::Kill) - .addReg(NewVReg3, RegState::Kill)); + BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) + .addReg(ARM::CPSR, RegState::Define) + .addReg(NewVReg2, RegState::Kill) + .addReg(NewVReg3, RegState::Kill) + .add(predOps(ARMCC::AL)); unsigned NewVReg5 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) .addFrameIndex(FI) .addImm(36); // &jbuf[1] :: pc - AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) - .addReg(NewVReg4, RegState::Kill) - .addReg(NewVReg5, RegState::Kill) - .addImm(0) - .addMemOperand(FIMMOSt)); + BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) + .addReg(NewVReg4, RegState::Kill) + .addReg(NewVReg5, RegState::Kill) + .addImm(0) + .addMemOperand(FIMMOSt) + .add(predOps(ARMCC::AL)); } else { // Incoming value: jbuf // ldr r1, LCPI1_1 // add r1, pc, r1 // str r1, [$jbuf, #+4] ; &jbuf[1] unsigned NewVReg1 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) - .addConstantPoolIndex(CPI) - .addImm(0) - .addMemOperand(CPMMO)); + BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) + .addConstantPoolIndex(CPI) + .addImm(0) + .addMemOperand(CPMMO) + .add(predOps(ARMCC::AL)); unsigned NewVReg2 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) - .addReg(NewVReg1, RegState::Kill) - .addImm(PCLabelId)); - AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) - .addReg(NewVReg2, RegState::Kill) - .addFrameIndex(FI) - .addImm(36) // &jbuf[1] :: pc - .addMemOperand(FIMMOSt)); + BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) + .addReg(NewVReg1, RegState::Kill) + .addImm(PCLabelId) + .add(predOps(ARMCC::AL)); + BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) + .addReg(NewVReg2, RegState::Kill) + .addFrameIndex(FI) + .addImm(36) // &jbuf[1] :: pc + .addMemOperand(FIMMOSt) + .add(predOps(ARMCC::AL)); } } @@ -8004,31 +8013,36 @@ unsigned NumLPads = LPadList.size(); if (Subtarget->isThumb2()) { unsigned NewVReg1 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) - .addFrameIndex(FI) - .addImm(4) - .addMemOperand(FIMMOLd)); + BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) + .addFrameIndex(FI) + .addImm(4) + .addMemOperand(FIMMOLd) + .add(predOps(ARMCC::AL)); if (NumLPads < 256) { - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) - .addReg(NewVReg1) - .addImm(LPadList.size())); + BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) + .addReg(NewVReg1) + .addImm(LPadList.size()) + .add(predOps(ARMCC::AL)); } else { unsigned VReg1 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) - .addImm(NumLPads & 0xFFFF)); + BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) + .addImm(NumLPads & 0xFFFF) + .add(predOps(ARMCC::AL)); unsigned VReg2 = VReg1; if ((NumLPads & 0xFFFF0000) != 0) { VReg2 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) - .addReg(VReg1) - .addImm(NumLPads >> 16)); + BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) + .addReg(VReg1) + .addImm(NumLPads >> 16) + .add(predOps(ARMCC::AL)); } - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) - .addReg(NewVReg1) - .addReg(VReg2)); + BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) + .addReg(NewVReg1) + .addReg(VReg2) + .add(predOps(ARMCC::AL)); } BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) @@ -8037,16 +8051,16 @@ .addReg(ARM::CPSR); unsigned NewVReg3 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) - .addJumpTableIndex(MJTI)); + BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3) + .addJumpTableIndex(MJTI) + .add(predOps(ARMCC::AL)); unsigned NewVReg4 = MRI->createVirtualRegister(TRC); - AddDefaultCC( - AddDefaultPred( - BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) - .addReg(NewVReg3, RegState::Kill) - .addReg(NewVReg1) - .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); + AddDefaultCC(BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) + .addReg(NewVReg3, RegState::Kill) + .addReg(NewVReg1) + .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) + .add(predOps(ARMCC::AL))); BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) .addReg(NewVReg4, RegState::Kill) @@ -8054,15 +8068,17 @@ .addJumpTableIndex(MJTI); } else if (Subtarget->isThumb()) { unsigned NewVReg1 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) - .addFrameIndex(FI) - .addImm(1) - .addMemOperand(FIMMOLd)); + BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) + .addFrameIndex(FI) + .addImm(1) + .addMemOperand(FIMMOLd) + .add(predOps(ARMCC::AL)); if (NumLPads < 256) { - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) - .addReg(NewVReg1) - .addImm(NumLPads)); + BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) + .addReg(NewVReg1) + .addImm(NumLPads) + .add(predOps(ARMCC::AL)); } else { MachineConstantPool *ConstantPool = MF->getConstantPool(); Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); @@ -8075,12 +8091,14 @@ unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); unsigned VReg1 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) - .addReg(VReg1, RegState::Define) - .addConstantPoolIndex(Idx)); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) - .addReg(NewVReg1) - .addReg(VReg1)); + BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) + .addReg(VReg1, RegState::Define) + .addConstantPoolIndex(Idx) + .add(predOps(ARMCC::AL)); + BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) + .addReg(NewVReg1) + .addReg(VReg1) + .add(predOps(ARMCC::AL)); } BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) @@ -8089,37 +8107,42 @@ .addReg(ARM::CPSR); unsigned NewVReg2 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) - .addReg(ARM::CPSR, RegState::Define) - .addReg(NewVReg1) - .addImm(2)); + BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) + .addReg(ARM::CPSR, RegState::Define) + .addReg(NewVReg1) + .addImm(2) + .add(predOps(ARMCC::AL)); unsigned NewVReg3 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) - .addJumpTableIndex(MJTI)); + BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) + .addJumpTableIndex(MJTI) + .add(predOps(ARMCC::AL)); unsigned NewVReg4 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) - .addReg(ARM::CPSR, RegState::Define) - .addReg(NewVReg2, RegState::Kill) - .addReg(NewVReg3)); + BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) + .addReg(ARM::CPSR, RegState::Define) + .addReg(NewVReg2, RegState::Kill) + .addReg(NewVReg3) + .add(predOps(ARMCC::AL)); MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); unsigned NewVReg5 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) - .addReg(NewVReg4, RegState::Kill) - .addImm(0) - .addMemOperand(JTMMOLd)); + BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) + .addReg(NewVReg4, RegState::Kill) + .addImm(0) + .addMemOperand(JTMMOLd) + .add(predOps(ARMCC::AL)); unsigned NewVReg6 = NewVReg5; if (IsPositionIndependent) { NewVReg6 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) - .addReg(ARM::CPSR, RegState::Define) - .addReg(NewVReg5, RegState::Kill) - .addReg(NewVReg3)); + BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) + .addReg(ARM::CPSR, RegState::Define) + .addReg(NewVReg5, RegState::Kill) + .addReg(NewVReg3) + .add(predOps(ARMCC::AL)); } BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) @@ -8127,31 +8150,36 @@ .addJumpTableIndex(MJTI); } else { unsigned NewVReg1 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) - .addFrameIndex(FI) - .addImm(4) - .addMemOperand(FIMMOLd)); + BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) + .addFrameIndex(FI) + .addImm(4) + .addMemOperand(FIMMOLd) + .add(predOps(ARMCC::AL)); if (NumLPads < 256) { - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) - .addReg(NewVReg1) - .addImm(NumLPads)); + BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) + .addReg(NewVReg1) + .addImm(NumLPads) + .add(predOps(ARMCC::AL)); } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { unsigned VReg1 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) - .addImm(NumLPads & 0xFFFF)); + BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) + .addImm(NumLPads & 0xFFFF) + .add(predOps(ARMCC::AL)); unsigned VReg2 = VReg1; if ((NumLPads & 0xFFFF0000) != 0) { VReg2 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) - .addReg(VReg1) - .addImm(NumLPads >> 16)); + BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) + .addReg(VReg1) + .addImm(NumLPads >> 16) + .add(predOps(ARMCC::AL)); } - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) - .addReg(NewVReg1) - .addReg(VReg2)); + BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) + .addReg(NewVReg1) + .addReg(VReg2) + .add(predOps(ARMCC::AL)); } else { MachineConstantPool *ConstantPool = MF->getConstantPool(); Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); @@ -8164,13 +8192,15 @@ unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); unsigned VReg1 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) - .addReg(VReg1, RegState::Define) - .addConstantPoolIndex(Idx) - .addImm(0)); - AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) - .addReg(NewVReg1) - .addReg(VReg1, RegState::Kill)); + BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) + .addReg(VReg1, RegState::Define) + .addConstantPoolIndex(Idx) + .addImm(0) + .add(predOps(ARMCC::AL)); + BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) + .addReg(NewVReg1) + .addReg(VReg1, RegState::Kill) + .add(predOps(ARMCC::AL)); } BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) @@ -8179,23 +8209,24 @@ .addReg(ARM::CPSR); unsigned NewVReg3 = MRI->createVirtualRegister(TRC); - AddDefaultCC( - AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) + AddDefaultCC(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) .addReg(NewVReg1) - .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); + .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) + .add(predOps(ARMCC::AL))); unsigned NewVReg4 = MRI->createVirtualRegister(TRC); - AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) - .addJumpTableIndex(MJTI)); + BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) + .addJumpTableIndex(MJTI) + .add(predOps(ARMCC::AL)); MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); unsigned NewVReg5 = MRI->createVirtualRegister(TRC); - AddDefaultPred( - BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) - .addReg(NewVReg3, RegState::Kill) - .addReg(NewVReg4) - .addImm(0) - .addMemOperand(JTMMOLd)); + BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) + .addReg(NewVReg3, RegState::Kill) + .addReg(NewVReg4) + .addImm(0) + .addMemOperand(JTMMOLd) + .add(predOps(ARMCC::AL)); if (IsPositionIndependent) { BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) @@ -8340,26 +8371,35 @@ unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); assert(LdOpc != 0 && "Should have a load opcode"); if (LdSize >= 8) { - AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) - .addReg(AddrOut, RegState::Define).addReg(AddrIn) - .addImm(0)); + BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) + .addReg(AddrOut, RegState::Define) + .addReg(AddrIn) + .addImm(0) + .add(predOps(ARMCC::AL)); } else if (IsThumb1) { // load + update AddrIn - AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) - .addReg(AddrIn).addImm(0)); + BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) + .addReg(AddrIn) + .addImm(0) + .add(predOps(ARMCC::AL)); MachineInstrBuilder MIB = BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut); MIB = AddDefaultT1CC(MIB); MIB.addReg(AddrIn).addImm(LdSize); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); } else if (IsThumb2) { - AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) - .addReg(AddrOut, RegState::Define).addReg(AddrIn) - .addImm(LdSize)); + BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) + .addReg(AddrOut, RegState::Define) + .addReg(AddrIn) + .addImm(LdSize) + .add(predOps(ARMCC::AL)); } else { // arm - AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) - .addReg(AddrOut, RegState::Define).addReg(AddrIn) - .addReg(0).addImm(LdSize)); + BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) + .addReg(AddrOut, RegState::Define) + .addReg(AddrIn) + .addReg(0) + .addImm(LdSize) + .add(predOps(ARMCC::AL)); } } @@ -8372,24 +8412,36 @@ unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); assert(StOpc != 0 && "Should have a store opcode"); if (StSize >= 8) { - AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) - .addReg(AddrIn).addImm(0).addReg(Data)); + BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) + .addReg(AddrIn) + .addImm(0) + .addReg(Data) + .add(predOps(ARMCC::AL)); } else if (IsThumb1) { // store + update AddrIn - AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc)).addReg(Data) - .addReg(AddrIn).addImm(0)); + BuildMI(*BB, Pos, dl, TII->get(StOpc)) + .addReg(Data) + .addReg(AddrIn) + .addImm(0) + .add(predOps(ARMCC::AL)); MachineInstrBuilder MIB = BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut); MIB = AddDefaultT1CC(MIB); MIB.addReg(AddrIn).addImm(StSize); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); } else if (IsThumb2) { - AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) - .addReg(Data).addReg(AddrIn).addImm(StSize)); + BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) + .addReg(Data) + .addReg(AddrIn) + .addImm(StSize) + .add(predOps(ARMCC::AL)); } else { // arm - AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) - .addReg(Data).addReg(AddrIn).addReg(0) - .addImm(StSize)); + BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) + .addReg(Data) + .addReg(AddrIn) + .addReg(0) + .addImm(StSize) + .add(predOps(ARMCC::AL)); } } @@ -8520,16 +8572,15 @@ unsigned Vtmp = varEnd; if ((LoopSize & 0xFFFF0000) != 0) Vtmp = MRI.createVirtualRegister(TRC); - AddDefaultPred(BuildMI(BB, dl, - TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), - Vtmp).addImm(LoopSize & 0xFFFF)); + BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp) + .addImm(LoopSize & 0xFFFF) + .add(predOps(ARMCC::AL)); if ((LoopSize & 0xFFFF0000) != 0) - AddDefaultPred(BuildMI(BB, dl, - TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), - varEnd) - .addReg(Vtmp) - .addImm(LoopSize >> 16)); + BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd) + .addReg(Vtmp) + .addImm(LoopSize >> 16) + .add(predOps(ARMCC::AL)); } else { MachineConstantPool *ConstantPool = MF->getConstantPool(); Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); @@ -8542,11 +8593,16 @@ unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); if (IsThumb) - AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)).addReg( - varEnd, RegState::Define).addConstantPoolIndex(Idx)); + BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)) + .addReg(varEnd, RegState::Define) + .addConstantPoolIndex(Idx) + .add(predOps(ARMCC::AL)); else - AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)).addReg( - varEnd, RegState::Define).addConstantPoolIndex(Idx).addImm(0)); + BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)) + .addReg(varEnd, RegState::Define) + .addConstantPoolIndex(Idx) + .addImm(0) + .add(predOps(ARMCC::AL)); } BB->addSuccessor(loopMBB); @@ -8587,12 +8643,12 @@ BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop); MIB = AddDefaultT1CC(MIB); MIB.addReg(varPhi).addImm(UnitSize); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); } else { MachineInstrBuilder MIB = BuildMI(*BB, BB->end(), dl, TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); - AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize))); + AddDefaultCC(MIB.addReg(varPhi).addImm(UnitSize).add(predOps(ARMCC::AL))); MIB->getOperand(5).setReg(ARM::CPSR); MIB->getOperand(5).setIsDef(true); } @@ -8686,11 +8742,11 @@ } } - AddDefaultCC(AddDefaultPred(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), - ARM::SP) - .addReg(ARM::SP, RegState::Kill) - .addReg(ARM::R4, RegState::Kill) - .setMIFlags(MachineInstr::FrameSetup))); + AddDefaultCC(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP) + .addReg(ARM::SP, RegState::Kill) + .addReg(ARM::R4, RegState::Kill) + .setMIFlags(MachineInstr::FrameSetup) + .add(predOps(ARMCC::AL))); MI.eraseFromParent(); return MBB; @@ -8715,9 +8771,10 @@ MF->push_back(TrapBB); MBB->addSuccessor(TrapBB); - AddDefaultPred(BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8)) - .addReg(MI.getOperand(0).getReg()) - .addImm(0)); + BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8)) + .addReg(MI.getOperand(0).getReg()) + .addImm(0) + .add(predOps(ARMCC::AL)); BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc)) .addMBB(TrapBB) .addImm(ARMCC::EQ) @@ -8872,18 +8929,20 @@ unsigned LHS1 = MI.getOperand(1).getReg(); unsigned LHS2 = MI.getOperand(2).getReg(); if (RHSisZero) { - AddDefaultPred(BuildMI(BB, dl, - TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) - .addReg(LHS1).addImm(0)); + BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) + .addReg(LHS1) + .addImm(0) + .add(predOps(ARMCC::AL)); BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) .addReg(LHS2).addImm(0) .addImm(ARMCC::EQ).addReg(ARM::CPSR); } else { unsigned RHS1 = MI.getOperand(3).getReg(); unsigned RHS2 = MI.getOperand(4).getReg(); - AddDefaultPred(BuildMI(BB, dl, - TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) - .addReg(LHS1).addReg(RHS1)); + BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) + .addReg(LHS1) + .addReg(RHS1) + .add(predOps(ARMCC::AL)); BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) .addReg(LHS2).addReg(RHS2) .addImm(ARMCC::EQ).addReg(ARM::CPSR); @@ -8897,7 +8956,9 @@ BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); if (isThumb2) - AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); + BuildMI(BB, dl, TII->get(ARM::t2B)) + .addMBB(exitMBB) + .add(predOps(ARMCC::AL)); else BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); @@ -8960,9 +9021,10 @@ RSBBB->addSuccessor(SinkBB); // insert a cmp at the end of BB - AddDefaultPred(BuildMI(BB, dl, - TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) - .addReg(ABSSrcReg).addImm(0)); + BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) + .addReg(ABSSrcReg) + .addImm(0) + .add(predOps(ARMCC::AL)); // insert a bcc with opposite CC to ARMCC::MI at the end of BB BuildMI(BB, dl, Index: lib/Target/ARM/ARMInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMInstrInfo.cpp +++ lib/Target/ARM/ARMInstrInfo.cpp @@ -129,8 +129,9 @@ MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, 4); MIB.addMemOperand(MMO); - MIB = BuildMI(MBB, MI, DL, get(ARM::LDRi12), Reg); - MIB.addReg(Reg, RegState::Kill).addImm(0); - MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); - AddDefaultPred(MIB); + BuildMI(MBB, MI, DL, get(ARM::LDRi12), Reg) + .addReg(Reg, RegState::Kill) + .addImm(0) + .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()) + .add(predOps(ARMCC::AL)); } Index: lib/Target/ARM/ARMInstructionSelector.cpp =================================================================== --- lib/Target/ARM/ARMInstructionSelector.cpp +++ lib/Target/ARM/ARMInstructionSelector.cpp @@ -89,17 +89,17 @@ switch (I.getOpcode()) { case G_ADD: I.setDesc(TII.get(ARM::ADDrr)); - AddDefaultCC(AddDefaultPred(MIB)); + AddDefaultCC(MIB.add(predOps(ARMCC::AL))); break; case G_FRAME_INDEX: // Add 0 to the given frame index and hope it will eventually be folded into // the user(s). I.setDesc(TII.get(ARM::ADDri)); - AddDefaultCC(AddDefaultPred(MIB.addImm(0))); + AddDefaultCC(MIB.addImm(0).add(predOps(ARMCC::AL))); break; case G_LOAD: I.setDesc(TII.get(ARM::LDRi12)); - AddDefaultPred(MIB.addImm(0)); + MIB.addImm(0).add(predOps(ARMCC::AL)); break; default: return false; Index: lib/Target/ARM/ARMLoadStoreOptimizer.cpp =================================================================== --- lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1891,8 +1891,9 @@ for (auto Use : Prev->uses()) if (Use.isKill()) { - AddDefaultPred(BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::tBX)) - .addReg(Use.getReg(), RegState::Kill)) + BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::tBX)) + .addReg(Use.getReg(), RegState::Kill) + .add(predOps(ARMCC::AL)) .copyImplicitOps(*MBBI); MBB.erase(MBBI); MBB.erase(Prev); Index: lib/Target/ARM/Thumb1FrameLowering.cpp =================================================================== --- lib/Target/ARM/Thumb1FrameLowering.cpp +++ lib/Target/ARM/Thumb1FrameLowering.cpp @@ -238,9 +238,11 @@ if (HasFP) { FramePtrOffsetInBlock += MFI.getObjectOffset(FramePtrSpillFI) + GPRCS1Size + ArgRegsSaveSize; - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr) - .addReg(ARM::SP).addImm(FramePtrOffsetInBlock / 4) - .setMIFlags(MachineInstr::FrameSetup)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr) + .addReg(ARM::SP) + .addImm(FramePtrOffsetInBlock / 4) + .setMIFlags(MachineInstr::FrameSetup) + .add(predOps(ARMCC::AL)); if(FramePtrOffsetInBlock) { CFAOffset += FramePtrOffsetInBlock; unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( @@ -336,8 +338,9 @@ // will be allocated after this, so we can still use the base pointer // to reference locals. if (RegInfo->hasBasePointer(MF)) - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), BasePtr) - .addReg(ARM::SP)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), BasePtr) + .addReg(ARM::SP) + .add(predOps(ARMCC::AL)); // If the frame has variable sized objects then the epilogue must restore // the sp from fp. We can assume there's an FP here since hasFP already @@ -408,13 +411,13 @@ "No scratch register to restore SP from FP!"); emitThumbRegPlusImmediate(MBB, MBBI, dl, ARM::R4, FramePtr, -NumBytes, TII, *RegInfo); - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), - ARM::SP) - .addReg(ARM::R4)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) + .addReg(ARM::R4) + .add(predOps(ARMCC::AL)); } else - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), - ARM::SP) - .addReg(FramePtr)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) + .addReg(FramePtr) + .add(predOps(ARMCC::AL)); } else { if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tBX_RET && &MBB.front() != &*MBBI && std::prev(MBBI)->getOpcode() == ARM::tPOP) { @@ -493,8 +496,8 @@ if (!DoIt || MBBI->getOpcode() == ARM::tPOP_RET) return true; MachineInstrBuilder MIB = - AddDefaultPred( - BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII.get(ARM::tPOP_RET))); + BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII.get(ARM::tPOP_RET)) + .add(predOps(ARMCC::AL)); // Copy implicit ops and popped registers, if any. for (auto MO: MBBI->operands()) if (MO.isReg() && (MO.isImplicit() || MO.isDef())) @@ -566,17 +569,18 @@ if (TemporaryReg) { assert(!PopReg && "Unnecessary MOV is about to be inserted"); PopReg = PopFriendly.find_first(); - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) - .addReg(TemporaryReg, RegState::Define) - .addReg(PopReg, RegState::Kill)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) + .addReg(TemporaryReg, RegState::Define) + .addReg(PopReg, RegState::Kill) + .add(predOps(ARMCC::AL)); } if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPOP_RET) { // We couldn't use the direct restoration above, so // perform the opposite conversion: tPOP_RET to tPOP. MachineInstrBuilder MIB = - AddDefaultPred( - BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII.get(ARM::tPOP))); + BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII.get(ARM::tPOP)) + .add(predOps(ARMCC::AL)); bool Popped = false; for (auto MO: MBBI->operands()) if (MO.isReg() && (MO.isImplicit() || MO.isDef()) && @@ -590,23 +594,27 @@ MBB.erase(MIB.getInstr()); // Erase the old instruction. MBB.erase(MBBI); - MBBI = AddDefaultPred(BuildMI(MBB, MBB.end(), dl, TII.get(ARM::tBX_RET))); + MBBI = BuildMI(MBB, MBB.end(), dl, TII.get(ARM::tBX_RET)) + .add(predOps(ARMCC::AL)); } assert(PopReg && "Do not know how to get LR"); - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP))) + BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP)) + .add(predOps(ARMCC::AL)) .addReg(PopReg, RegState::Define); emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, ArgRegsSaveSize); - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) - .addReg(ARM::LR, RegState::Define) - .addReg(PopReg, RegState::Kill)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) + .addReg(ARM::LR, RegState::Define) + .addReg(PopReg, RegState::Kill) + .add(predOps(ARMCC::AL)); if (TemporaryReg) - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) - .addReg(PopReg, RegState::Define) - .addReg(TemporaryReg, RegState::Kill)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) + .addReg(PopReg, RegState::Define) + .addReg(TemporaryReg, RegState::Kill) + .add(predOps(ARMCC::AL)); return true; } @@ -667,8 +675,8 @@ // Push the low registers and lr if (!LoRegsToSave.empty()) { - MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(ARM::tPUSH)); - AddDefaultPred(MIB); + MachineInstrBuilder MIB = + BuildMI(MBB, MI, DL, TII.get(ARM::tPUSH)).add(predOps(ARMCC::AL)); for (unsigned Reg : {ARM::R4, ARM::R5, ARM::R6, ARM::R7, ARM::LR}) { if (LoRegsToSave.count(Reg)) { bool isKill = !MF.getRegInfo().isLiveIn(Reg); @@ -708,8 +716,8 @@ findNextOrderedReg(std::begin(AllCopyRegs), CopyRegs, AllCopyRegsEnd); // Create the PUSH, but don't insert it yet (the MOVs need to come first). - MachineInstrBuilder PushMIB = BuildMI(MF, DL, TII.get(ARM::tPUSH)); - AddDefaultPred(PushMIB); + MachineInstrBuilder PushMIB = + BuildMI(MF, DL, TII.get(ARM::tPUSH)).add(predOps(ARMCC::AL)); SmallVector RegsToPush; while (HiRegToSave != AllHighRegsEnd && CopyReg != AllCopyRegsEnd) { @@ -719,11 +727,10 @@ MBB.addLiveIn(*HiRegToSave); // Emit a MOV from the high reg to the low reg. - MachineInstrBuilder MIB = - BuildMI(MBB, MI, DL, TII.get(ARM::tMOVr)); - MIB.addReg(*CopyReg, RegState::Define); - MIB.addReg(*HiRegToSave, getKillRegState(isKill)); - AddDefaultPred(MIB); + BuildMI(MBB, MI, DL, TII.get(ARM::tMOVr)) + .addReg(*CopyReg, RegState::Define) + .addReg(*HiRegToSave, getKillRegState(isKill)) + .add(predOps(ARMCC::AL)); // Record the register that must be added to the PUSH. RegsToPush.push_back(*CopyReg); @@ -817,19 +824,18 @@ findNextOrderedReg(std::begin(AllCopyRegs), CopyRegs, AllCopyRegsEnd); // Create the POP instruction. - MachineInstrBuilder PopMIB = BuildMI(MBB, MI, DL, TII.get(ARM::tPOP)); - AddDefaultPred(PopMIB); + MachineInstrBuilder PopMIB = + BuildMI(MBB, MI, DL, TII.get(ARM::tPOP)).add(predOps(ARMCC::AL)); while (HiRegToRestore != AllHighRegsEnd && CopyReg != AllCopyRegsEnd) { // Add the low register to the POP. PopMIB.addReg(*CopyReg, RegState::Define); // Create the MOV from low to high register. - MachineInstrBuilder MIB = - BuildMI(MBB, MI, DL, TII.get(ARM::tMOVr)); - MIB.addReg(*HiRegToRestore, RegState::Define); - MIB.addReg(*CopyReg, RegState::Kill); - AddDefaultPred(MIB); + BuildMI(MBB, MI, DL, TII.get(ARM::tMOVr)) + .addReg(*HiRegToRestore, RegState::Define) + .addReg(*CopyReg, RegState::Kill) + .add(predOps(ARMCC::AL)); CopyReg = findNextOrderedReg(++CopyReg, CopyRegs, AllCopyRegsEnd); HiRegToRestore = @@ -837,11 +843,8 @@ } } - - - - MachineInstrBuilder MIB = BuildMI(MF, DL, TII.get(ARM::tPOP)); - AddDefaultPred(MIB); + MachineInstrBuilder MIB = + BuildMI(MF, DL, TII.get(ARM::tPOP)).add(predOps(ARMCC::AL)); bool NeedsPop = false; for (unsigned i = CSI.size(); i != 0; --i) { Index: lib/Target/ARM/Thumb1InstrInfo.cpp =================================================================== --- lib/Target/ARM/Thumb1InstrInfo.cpp +++ lib/Target/ARM/Thumb1InstrInfo.cpp @@ -50,8 +50,9 @@ if (st.hasV6Ops() || ARM::hGPRRegClass.contains(SrcReg) || !ARM::tGPRRegClass.contains(DestReg)) - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc))); + BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .add(predOps(ARMCC::AL)); else { // FIXME: The performance consequences of this are going to be atrocious. // Some things to try that should be better: @@ -60,10 +61,12 @@ // See: http://lists.llvm.org/pipermail/llvm-dev/2014-August/075998.html // 'MOV lo, lo' is unpredictable on < v6, so use the stack to do it - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tPUSH))) - .addReg(SrcReg, getKillRegState(KillSrc)); - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tPOP))) - .addReg(DestReg, getDefRegState(true)); + BuildMI(MBB, I, DL, get(ARM::tPUSH)) + .add(predOps(ARMCC::AL)) + .addReg(SrcReg, getKillRegState(KillSrc)); + BuildMI(MBB, I, DL, get(ARM::tPOP)) + .add(predOps(ARMCC::AL)) + .addReg(DestReg, getDefRegState(true)); } } @@ -87,9 +90,12 @@ MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tSTRspi)) - .addReg(SrcReg, getKillRegState(isKill)) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::tSTRspi)) + .addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } } @@ -113,8 +119,11 @@ MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tLDRspi), DestReg) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::tLDRspi), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } } Index: lib/Target/ARM/Thumb2InstrInfo.cpp =================================================================== --- lib/Target/ARM/Thumb2InstrInfo.cpp +++ lib/Target/ARM/Thumb2InstrInfo.cpp @@ -117,8 +117,9 @@ if (!ARM::GPRRegClass.contains(DestReg, SrcReg)) return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc); - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc))); + BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .add(predOps(ARMCC::AL)); } void Thumb2InstrInfo:: @@ -138,9 +139,12 @@ if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || RC == &ARM::GPRnopcRegClass) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2STRi12)) - .addReg(SrcReg, getKillRegState(isKill)) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::t2STRi12)) + .addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); return; } @@ -156,8 +160,7 @@ MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8)); AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); - MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO); - AddDefaultPred(MIB); + MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); return; } @@ -180,8 +183,11 @@ if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || RC == &ARM::GPRnopcRegClass) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); return; } @@ -198,8 +204,7 @@ MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8)); AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); - MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO); - AddDefaultPred(MIB); + MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); if (TargetRegisterInfo::isPhysicalRegister(DestReg)) MIB.addReg(DestReg, RegState::ImplicitDefine); @@ -284,8 +289,10 @@ unsigned Opc = 0; if (DestReg == ARM::SP && BaseReg != ARM::SP) { // mov sp, rn. Note t2MOVr cannot be used. - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr),DestReg) - .addReg(BaseReg).setMIFlags(MIFlags)); + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) + .addReg(BaseReg) + .setMIFlags(MIFlags) + .add(predOps(ARMCC::AL)); BaseReg = ARM::SP; continue; } @@ -296,8 +303,11 @@ if (DestReg == ARM::SP && (ThisVal < ((1 << 7)-1) * 4)) { assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?"); Opc = isSub ? ARM::tSUBspi : ARM::tADDspi; - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) - .addReg(BaseReg).addImm(ThisVal/4).setMIFlags(MIFlags)); + BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) + .addReg(BaseReg) + .addImm(ThisVal / 4) + .setMIFlags(MIFlags) + .add(predOps(ARMCC::AL)); NumBytes = 0; continue; } @@ -334,10 +344,11 @@ } // Build the new ADD / SUB. - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) - .addReg(BaseReg, RegState::Kill) - .addImm(ThisVal)).setMIFlags(MIFlags); + MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) + .addReg(BaseReg, RegState::Kill) + .addImm(ThisVal) + .add(predOps(ARMCC::AL)) + .setMIFlags(MIFlags); if (HasCCOut) AddDefaultCC(MIB); @@ -474,7 +485,7 @@ do MI.RemoveOperand(FrameRegIdx+1); while (MI.getNumOperands() > FrameRegIdx+1); MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); return true; } Index: lib/Target/ARM/Thumb2SizeReduction.cpp =================================================================== --- lib/Target/ARM/Thumb2SizeReduction.cpp +++ lib/Target/ARM/Thumb2SizeReduction.cpp @@ -621,12 +621,13 @@ MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR) return false; - MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), - TII->get(ARM::tADDrSPi)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)) - .addImm(Imm / 4); // The tADDrSPi has an implied scale by four. - AddDefaultPred(MIB); + MachineInstrBuilder MIB = + BuildMI(MBB, MI, MI->getDebugLoc(), + TII->get(ARM::tADDrSPi)) + .addOperand(MI->getOperand(0)) + .addOperand(MI->getOperand(1)) + .addImm(Imm / 4) // The tADDrSPi has an implied scale by four. + .add(predOps(ARMCC::AL)); // Transfer MI flags. MIB.setMIFlags(MI->getFlags()); @@ -912,7 +913,7 @@ MIB.addOperand(MO); } if (!MCID.isPredicable() && NewMCID.isPredicable()) - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); // Transfer MI flags. MIB.setMIFlags(MI->getFlags()); Index: lib/Target/ARM/ThumbRegisterInfo.cpp =================================================================== --- lib/Target/ARM/ThumbRegisterInfo.cpp +++ lib/Target/ARM/ThumbRegisterInfo.cpp @@ -172,7 +172,7 @@ MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill); else MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill); - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); } /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize @@ -312,7 +312,7 @@ if (CopyOpc != ARM::tMOVr) { MIB.addImm(CopyImm); } - AddDefaultPred(MIB.setMIFlags(MIFlags)); + MIB.setMIFlags(MIFlags).add(predOps(ARMCC::AL)); BaseReg = DestReg; } @@ -325,9 +325,10 @@ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg); if (ExtraNeedsCC) MIB = AddDefaultT1CC(MIB); - MIB.addReg(BaseReg).addImm(ExtraImm); - MIB = AddDefaultPred(MIB); - MIB.setMIFlags(MIFlags); + MIB.addReg(BaseReg) + .addImm(ExtraImm) + .add(predOps(ARMCC::AL)) + .setMIFlags(MIFlags); } } @@ -460,9 +461,10 @@ // a call clobbered register that we know won't be used in Thumb1 mode. const TargetInstrInfo &TII = *STI.getInstrInfo(); DebugLoc DL; - AddDefaultPred(BuildMI(MBB, I, DL, TII.get(ARM::tMOVr)) - .addReg(ARM::R12, RegState::Define) - .addReg(Reg, RegState::Kill)); + BuildMI(MBB, I, DL, TII.get(ARM::tMOVr)) + .addReg(ARM::R12, RegState::Define) + .addReg(Reg, RegState::Kill) + .add(predOps(ARMCC::AL)); // The UseMI is where we would like to restore the register. If there's // interference with R12 before then, however, we'll need to restore it @@ -490,8 +492,10 @@ } } // Restore the register from R12 - AddDefaultPred(BuildMI(MBB, UseMI, DL, TII.get(ARM::tMOVr)). - addReg(Reg, RegState::Define).addReg(ARM::R12, RegState::Kill)); + BuildMI(MBB, UseMI, DL, TII.get(ARM::tMOVr)) + .addReg(Reg, RegState::Define) + .addReg(ARM::R12, RegState::Kill) + .add(predOps(ARMCC::AL)); return true; } @@ -621,5 +625,5 @@ // Add predicate back if it's needed. if (MI.isPredicable()) - AddDefaultPred(MIB); + MIB.add(predOps(ARMCC::AL)); }