Index: include/llvm/CodeGen/MachineInstrBuilder.h =================================================================== --- include/llvm/CodeGen/MachineInstrBuilder.h +++ include/llvm/CodeGen/MachineInstrBuilder.h @@ -187,7 +187,7 @@ return *this; } - const MachineInstrBuilder &addOperand(const MachineOperand &MO) const { + const MachineInstrBuilder &add(const MachineOperand &MO) const { MI->addOperand(*MF, MO); return *this; } Index: lib/CodeGen/ImplicitNullChecks.cpp =================================================================== --- lib/CodeGen/ImplicitNullChecks.cpp +++ lib/CodeGen/ImplicitNullChecks.cpp @@ -522,7 +522,7 @@ .addImm(LoadMI->getOpcode()); for (auto &MO : LoadMI->uses()) - MIB.addOperand(MO); + MIB.add(MO); MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end()); Index: lib/CodeGen/LiveDebugVariables.cpp =================================================================== --- lib/CodeGen/LiveDebugVariables.cpp +++ lib/CodeGen/LiveDebugVariables.cpp @@ -944,7 +944,7 @@ IsIndirect, Loc.getReg(), offset, Variable, Expression); else BuildMI(*MBB, I, getDebugLoc(), TII.get(TargetOpcode::DBG_VALUE)) - .addOperand(Loc) + .add(Loc) .addImm(offset) .addMetadata(Variable) .addMetadata(Expression); Index: lib/CodeGen/PatchableFunction.cpp =================================================================== --- lib/CodeGen/PatchableFunction.cpp +++ lib/CodeGen/PatchableFunction.cpp @@ -75,7 +75,7 @@ .addImm(FirstActualI->getOpcode()); for (auto &MO : FirstActualI->operands()) - MIB.addOperand(MO); + MIB.add(MO); FirstActualI->eraseFromParent(); MF.ensureAlignment(4); Index: lib/CodeGen/SelectionDAG/FastISel.cpp =================================================================== --- lib/CodeGen/SelectionDAG/FastISel.cpp +++ lib/CodeGen/SelectionDAG/FastISel.cpp @@ -646,7 +646,7 @@ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::STACKMAP)); for (auto const &MO : Ops) - MIB.addOperand(MO); + MIB.add(MO); // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); @@ -826,7 +826,7 @@ TII.get(TargetOpcode::PATCHPOINT)); for (auto &MO : Ops) - MIB.addOperand(MO); + MIB.add(MO); MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI); @@ -1149,7 +1149,7 @@ } else BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::DBG_VALUE)) - .addOperand(*Op) + .add(*Op) .addImm(0) .addMetadata(DI->getVariable()) .addMetadata(DI->getExpression()); Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4752,7 +4752,7 @@ else FuncInfo.ArgDbgValues.push_back( BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE)) - .addOperand(*Op) + .add(*Op) .addImm(Offset) .addMetadata(Variable) .addMetadata(Expr)); Index: lib/CodeGen/TargetInstrInfo.cpp =================================================================== --- lib/CodeGen/TargetInstrInfo.cpp +++ lib/CodeGen/TargetInstrInfo.cpp @@ -470,7 +470,7 @@ // No need to fold return, the meta data, and function arguments for (unsigned i = 0; i < StartIdx; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) { MachineOperand &MO = MI.getOperand(i); @@ -490,7 +490,7 @@ MIB.addImm(SpillOffset); } else - MIB.addOperand(MO); + MIB.add(MO); } return NewMI; } Index: lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- lib/CodeGen/TargetLoweringBase.cpp +++ lib/CodeGen/TargetLoweringBase.cpp @@ -1227,7 +1227,7 @@ // Copy operands before the frame-index. for (unsigned i = 0; i < OperIdx; ++i) - MIB.addOperand(MI->getOperand(i)); + MIB.add(MI->getOperand(i)); // Add frame index operands recognized by stackmaps.cpp if (MFI.isStatepointSpillSlotObjectIndex(FI)) { // indirect-mem-ref tag, size, #FI, offset. @@ -1237,18 +1237,18 @@ assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity"); MIB.addImm(StackMaps::IndirectMemRefOp); MIB.addImm(MFI.getObjectSize(FI)); - MIB.addOperand(MI->getOperand(OperIdx)); + MIB.add(MI->getOperand(OperIdx)); MIB.addImm(0); } else { // direct-mem-ref tag, #FI, offset. // Used by patchpoint, and direct alloca arguments to statepoints MIB.addImm(StackMaps::DirectMemRefOp); - MIB.addOperand(MI->getOperand(OperIdx)); + MIB.add(MI->getOperand(OperIdx)); MIB.addImm(0); } // Copy the operands after the frame index. for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i) - MIB.addOperand(MI->getOperand(i)); + MIB.add(MI->getOperand(i)); // Inherit previous memory operands. MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); Index: lib/CodeGen/TwoAddressInstructionPass.cpp =================================================================== --- lib/CodeGen/TwoAddressInstructionPass.cpp +++ lib/CodeGen/TwoAddressInstructionPass.cpp @@ -1785,7 +1785,7 @@ MachineInstr *CopyMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(TargetOpcode::COPY)) .addReg(DstReg, RegState::Define, SubIdx) - .addOperand(UseMO); + .add(UseMO); // The first def needs an flag because there is no live register // before it. Index: lib/CodeGen/XRayInstrumentation.cpp =================================================================== --- lib/CodeGen/XRayInstrumentation.cpp +++ lib/CodeGen/XRayInstrumentation.cpp @@ -81,7 +81,7 @@ auto MIB = BuildMI(MBB, T, T.getDebugLoc(), TII->get(Opc)) .addImm(T.getOpcode()); for (auto &MO : T.operands()) - MIB.addOperand(MO); + MIB.add(MO); Terminators.push_back(&T); } } Index: lib/Target/AArch64/AArch64CallLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64CallLowering.cpp +++ lib/Target/AArch64/AArch64CallLowering.cpp @@ -264,7 +264,7 @@ // uses of arg registers. auto MIB = MIRBuilder.buildInstrNoInsert(Callee.isReg() ? AArch64::BLR : AArch64::BL); - MIB.addOperand(Callee); + MIB.add(Callee); // Tell the call which registers are clobbered. auto TRI = MF.getSubtarget().getRegisterInfo(); Index: lib/Target/AArch64/AArch64ConditionOptimizer.cpp =================================================================== --- lib/Target/AArch64/AArch64ConditionOptimizer.cpp +++ lib/Target/AArch64/AArch64ConditionOptimizer.cpp @@ -265,10 +265,10 @@ // Change immediate in comparison instruction (ADDS or SUBS). BuildMI(*MBB, CmpMI, CmpMI->getDebugLoc(), TII->get(Opc)) - .addOperand(CmpMI->getOperand(0)) - .addOperand(CmpMI->getOperand(1)) + .add(CmpMI->getOperand(0)) + .add(CmpMI->getOperand(1)) .addImm(Imm) - .addOperand(CmpMI->getOperand(3)); + .add(CmpMI->getOperand(3)); CmpMI->eraseFromParent(); // The fact that this comparison was picked ensures that it's related to the @@ -278,7 +278,7 @@ // Change condition in branch instruction. BuildMI(*MBB, BrMI, BrMI.getDebugLoc(), TII->get(AArch64::Bcc)) .addImm(Cmp) - .addOperand(BrMI.getOperand(1)); + .add(BrMI.getOperand(1)); BrMI.eraseFromParent(); MBB->updateTerminator(); Index: lib/Target/AArch64/AArch64ConditionalCompares.cpp =================================================================== --- lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -594,7 +594,7 @@ // Insert a SUBS Rn, #0 instruction instead of the cbz / cbnz. BuildMI(*Head, Head->end(), TermDL, MCID) .addReg(DestReg, RegState::Define | RegState::Dead) - .addOperand(HeadCond[2]) + .add(HeadCond[2]) .addImm(0) .addImm(0); // SUBS uses the GPR*sp register classes. @@ -650,13 +650,12 @@ if (CmpMI->getOperand(FirstOp + 1).isReg()) MRI->constrainRegClass(CmpMI->getOperand(FirstOp + 1).getReg(), TII->getRegClass(MCID, 1, TRI, *MF)); - MachineInstrBuilder MIB = - BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), MCID) - .addOperand(CmpMI->getOperand(FirstOp)); // Register Rn + MachineInstrBuilder MIB = BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), MCID) + .add(CmpMI->getOperand(FirstOp)); // Register Rn if (isZBranch) MIB.addImm(0); // cbz/cbnz Rn -> ccmp Rn, #0 else - MIB.addOperand(CmpMI->getOperand(FirstOp + 1)); // Register Rm / Immediate + MIB.add(CmpMI->getOperand(FirstOp + 1)); // Register Rm / Immediate MIB.addImm(NZCV).addImm(HeadCmpBBCC); // If CmpMI was a terminator, we need a new conditional branch to replace it. @@ -666,7 +665,7 @@ CmpMI->getOpcode() == AArch64::CBNZX; BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), TII->get(AArch64::Bcc)) .addImm(isNZ ? AArch64CC::NE : AArch64CC::EQ) - .addOperand(CmpMI->getOperand(1)); // Branch target. + .add(CmpMI->getOperand(1)); // Branch target. } CmpMI->eraseFromParent(); Head->updateTerminator(); Index: lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp =================================================================== --- lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -70,9 +70,9 @@ const MachineOperand &MO = OldMI.getOperand(i); assert(MO.isReg() && MO.getReg()); if (MO.isUse()) - UseMI.addOperand(MO); + UseMI.add(MO); else - DefMI.addOperand(MO); + DefMI.add(MO); } } @@ -112,7 +112,7 @@ // Create the ORR-immediate instruction. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(AArch64::XZR) .addImm(Encoding); @@ -179,7 +179,7 @@ // Create the ORR-immediate instruction. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(AArch64::XZR) .addImm(Encoding); @@ -362,7 +362,7 @@ AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(AArch64::XZR) .addImm(Encoding); @@ -425,7 +425,7 @@ unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR) .addImm(Encoding); transferImpOps(MI, MIB, MIB); @@ -627,7 +627,7 @@ .addReg(Addr.getReg()); BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg) .addReg(Dest.getReg(), getKillRegState(Dest.isDead())) - .addOperand(Desired) + .add(Desired) .addImm(ExtendImm); BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc)) .addImm(AArch64CC::NE) @@ -643,9 +643,7 @@ StoreBB->addLiveIn(New.getReg()); addPostLoopLiveIns(StoreBB, LiveRegs); - BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg) - .addOperand(New) - .addOperand(Addr); + BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg).add(New).add(Addr); BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW)) .addReg(StatusReg, RegState::Kill) .addMBB(LoadCmpBB); @@ -710,7 +708,7 @@ .addReg(Addr.getReg()); BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR) .addReg(DestLo.getReg(), getKillRegState(DestLo.isDead())) - .addOperand(DesiredLo) + .add(DesiredLo) .addImm(0); BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg) .addUse(AArch64::WZR) @@ -718,7 +716,7 @@ .addImm(AArch64CC::EQ); BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR) .addReg(DestHi.getReg(), getKillRegState(DestHi.isDead())) - .addOperand(DesiredHi) + .add(DesiredHi) .addImm(0); BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg) .addUse(StatusReg, RegState::Kill) @@ -738,9 +736,9 @@ StoreBB->addLiveIn(NewHi.getReg()); addPostLoopLiveIns(StoreBB, LiveRegs); BuildMI(StoreBB, DL, TII->get(AArch64::STLXPX), StatusReg) - .addOperand(NewLo) - .addOperand(NewHi) - .addOperand(Addr); + .add(NewLo) + .add(NewHi) + .add(Addr); BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW)) .addReg(StatusReg, RegState::Kill) .addMBB(LoadCmpBB); @@ -825,8 +823,8 @@ MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); transferImpOps(MI, MIB1, MIB1); MI.eraseFromParent(); @@ -842,7 +840,7 @@ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg); MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(DstReg); if (MO1.isGlobal()) { @@ -878,13 +876,13 @@ unsigned DstReg = MI.getOperand(0).getReg(); MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg) - .addOperand(MI.getOperand(1)); + .add(MI.getOperand(1)); MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(DstReg) - .addOperand(MI.getOperand(2)) + .add(MI.getOperand(2)) .addImm(0); transferImpOps(MI, MIB1, MIB2); Index: lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64FrameLowering.cpp +++ lib/Target/AArch64/AArch64FrameLowering.cpp @@ -362,7 +362,7 @@ unsigned OpndIdx = 0; for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd; ++OpndIdx) - MIB.addOperand(MBBI->getOperand(OpndIdx)); + MIB.add(MBBI->getOperand(OpndIdx)); assert(MBBI->getOperand(OpndIdx).getImm() == 0 && "Unexpected immediate offset in first/last callee-save save/restore " Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -369,7 +369,7 @@ // Folded compare-and-branch // Note that we use addOperand instead of addReg to keep the flags. const MachineInstrBuilder MIB = - BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]); + BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]); if (Cond.size() > 3) MIB.addImm(Cond[3].getImm()); MIB.addMBB(TBB); @@ -3793,7 +3793,7 @@ MachineInstrBuilder MIB1 = BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR) .addReg(ZeroReg) - .addOperand(Root.getOperand(2)); + .add(Root.getOperand(2)); InsInstrs.push_back(MIB1); InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC); Index: lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp =================================================================== --- lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -595,7 +595,7 @@ MachineInstrBuilder MIB; MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc))) .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR) - .addOperand(BaseRegOp) + .add(BaseRegOp) .addImm(OffsetImm) .setMemRefs(I->mergeMemRefsWith(*MergeMI)); (void)MIB; @@ -688,9 +688,9 @@ DebugLoc DL = I->getDebugLoc(); MachineBasicBlock *MBB = I->getParent(); MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc))) - .addOperand(getLdStRegOp(*RtMI)) - .addOperand(getLdStRegOp(*Rt2MI)) - .addOperand(BaseRegOp) + .add(getLdStRegOp(*RtMI)) + .add(getLdStRegOp(*Rt2MI)) + .add(BaseRegOp) .addImm(OffsetImm) .setMemRefs(I->mergeMemRefsWith(*Paired)); @@ -1210,19 +1210,19 @@ if (!isPairedLdSt(*I)) { // Non-paired instruction. MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc)) - .addOperand(getLdStRegOp(*Update)) - .addOperand(getLdStRegOp(*I)) - .addOperand(getLdStBaseOp(*I)) + .add(getLdStRegOp(*Update)) + .add(getLdStRegOp(*I)) + .add(getLdStBaseOp(*I)) .addImm(Value) .setMemRefs(I->memoperands_begin(), I->memoperands_end()); } else { // Paired instruction. int Scale = getMemScale(*I); MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc)) - .addOperand(getLdStRegOp(*Update)) - .addOperand(getLdStRegOp(*I, 0)) - .addOperand(getLdStRegOp(*I, 1)) - .addOperand(getLdStBaseOp(*I)) + .add(getLdStRegOp(*Update)) + .add(getLdStRegOp(*I, 0)) + .add(getLdStRegOp(*I, 1)) + .add(getLdStBaseOp(*I)) .addImm(Value / Scale) .setMemRefs(I->memoperands_begin(), I->memoperands_end()); } Index: lib/Target/AMDGPU/R600ISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.cpp +++ lib/Target/AMDGPU/R600ISelLowering.cpp @@ -266,7 +266,7 @@ NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::getLDSNoRetOp(MI.getOpcode()))); for (unsigned i = 1, e = MI.getNumOperands(); i < e; ++i) { - NewMI.addOperand(MI.getOperand(i)); + NewMI.add(MI.getOperand(i)); } } else { return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); @@ -339,34 +339,34 @@ case AMDGPU::RAT_WRITE_CACHELESS_64_eg: case AMDGPU::RAT_WRITE_CACHELESS_128_eg: BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) .addImm(isEOP(I)); // Set End of program bit break; case AMDGPU::RAT_STORE_TYPED_eg: BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) .addImm(isEOP(I)); // Set End of program bit break; case AMDGPU::BRANCH: BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP)) - .addOperand(MI.getOperand(0)); + .add(MI.getOperand(0)); break; case AMDGPU::BRANCH_COND_f32: { MachineInstr *NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X), AMDGPU::PREDICATE_BIT) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(AMDGPU::PRED_SETNE) .addImm(0); // Flags TII->addFlag(*NewMI, 0, MO_FLAG_PUSH); BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); break; } @@ -375,12 +375,12 @@ MachineInstr *NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X), AMDGPU::PREDICATE_BIT) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(AMDGPU::PRED_SETNE_INT) .addImm(0); // Flags TII->addFlag(*NewMI, 0, MO_FLAG_PUSH); BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); break; } @@ -408,13 +408,13 @@ return BB; unsigned CfInst = (MI.getOpcode() == AMDGPU::EG_ExportSwz) ? 84 : 40; BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) - .addOperand(MI.getOperand(3)) - .addOperand(MI.getOperand(4)) - .addOperand(MI.getOperand(5)) - .addOperand(MI.getOperand(6)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) + .add(MI.getOperand(3)) + .add(MI.getOperand(4)) + .add(MI.getOperand(5)) + .add(MI.getOperand(6)) .addImm(CfInst) .addImm(EOP); break; Index: lib/Target/AMDGPU/SIFixSGPRCopies.cpp =================================================================== --- lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -234,8 +234,9 @@ unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC); - BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), TmpReg) - .addOperand(MI.getOperand(I)); + BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), + TmpReg) + .add(MI.getOperand(I)); MI.getOperand(I).setReg(TmpReg); } Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -1461,16 +1461,16 @@ VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; if (Offset == 0) { MachineInstr *SetOn = - BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) - .addOperand(*Idx) - .addImm(IdxMode); + BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) + .add(*Idx) + .addImm(IdxMode); SetOn->getOperand(3).setIsUndef(); } else { unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) - .addOperand(*Idx) - .addImm(Offset); + .add(*Idx) + .addImm(Offset); MachineInstr *SetOn = BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) .addReg(Tmp, RegState::Kill) @@ -1483,12 +1483,11 @@ } if (Offset == 0) { - BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) - .addOperand(*Idx); + BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx); } else { BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) - .addOperand(*Idx) - .addImm(Offset); + .add(*Idx) + .addImm(Offset); } return true; @@ -1625,9 +1624,9 @@ assert(Offset == 0); BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) - .addOperand(*SrcVec) - .addOperand(*Val) - .addImm(SubReg); + .add(*SrcVec) + .add(*Val) + .addImm(SubReg); MI.eraseFromParent(); return &MBB; @@ -1639,11 +1638,11 @@ if (UseGPRIdxMode) { BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) - .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst - .addOperand(*Val) - .addReg(Dst, RegState::ImplicitDefine) - .addReg(SrcVec->getReg(), RegState::Implicit) - .addReg(AMDGPU::M0, RegState::Implicit); + .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst + .add(*Val) + .addReg(Dst, RegState::ImplicitDefine) + .addReg(SrcVec->getReg(), RegState::Implicit) + .addReg(AMDGPU::M0, RegState::Implicit); BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); } else { @@ -1652,7 +1651,7 @@ BuildMI(MBB, I, DL, MovRelDesc) .addReg(Dst, RegState::Define) .addReg(SrcVec->getReg()) - .addOperand(*Val) + .add(*Val) .addImm(SubReg - AMDGPU::sub0); } @@ -1685,18 +1684,18 @@ if (UseGPRIdxMode) { BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) - .addReg(PhiReg, RegState::Undef, SubReg) // vdst - .addOperand(*Val) // src0 - .addReg(Dst, RegState::ImplicitDefine) - .addReg(PhiReg, RegState::Implicit) - .addReg(AMDGPU::M0, RegState::Implicit); + .addReg(PhiReg, RegState::Undef, SubReg) // vdst + .add(*Val) // src0 + .addReg(Dst, RegState::ImplicitDefine) + .addReg(PhiReg, RegState::Implicit) + .addReg(AMDGPU::M0, RegState::Implicit); } else { const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC)); BuildMI(*LoopBB, InsPt, DL, MovRelDesc) .addReg(Dst, RegState::Define) .addReg(PhiReg) - .addOperand(*Val) + .add(*Val) .addImm(SubReg - AMDGPU::sub0); } @@ -1735,15 +1734,15 @@ case AMDGPU::SI_INIT_M0: { BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) - .addOperand(MI.getOperand(0)); + .add(MI.getOperand(0)); MI.eraseFromParent(); return BB; } case AMDGPU::GET_GROUPSTATICSIZE: { DebugLoc DL = MI.getDebugLoc(); BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) - .addOperand(MI.getOperand(0)) - .addImm(MFI->getLDSSize()); + .add(MI.getOperand(0)) + .addImm(MFI->getLDSSize()); MI.eraseFromParent(); return BB; } @@ -1794,7 +1793,7 @@ const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); const DebugLoc &DL = MI.getDebugLoc(); MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) - .addOperand(MI.getOperand(0)); + .add(MI.getOperand(0)); Br->getOperand(1).setIsUndef(true); // read undef SCC MI.eraseFromParent(); return BB; Index: lib/Target/AMDGPU/SIInsertSkips.cpp =================================================================== --- lib/Target/AMDGPU/SIInsertSkips.cpp +++ lib/Target/AMDGPU/SIInsertSkips.cpp @@ -195,8 +195,8 @@ } } else { BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32)) - .addImm(0) - .addOperand(Op); + .addImm(0) + .add(Op); } } Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -870,9 +870,10 @@ MachineInstr *MovRel = BuildMI(MBB, MI, DL, MovRelDesc) .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) - .addOperand(MI.getOperand(2)) + .add(MI.getOperand(2)) .addReg(VecReg, RegState::ImplicitDefine) - .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); + .addReg(VecReg, + RegState::Implicit | (IsUndef ? RegState::Undef : 0)); const int ImpDefIdx = MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses(); @@ -897,14 +898,14 @@ // constant data. Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) .addReg(RegLo) - .addOperand(MI.getOperand(1))); + .add(MI.getOperand(1))); MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) .addReg(RegHi); if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE) MIB.addImm(0); else - MIB.addOperand(MI.getOperand(2)); + MIB.add(MI.getOperand(2)); Bundler.append(MIB); llvm::finalizeBundle(MBB, Bundler.begin()); @@ -1638,13 +1639,13 @@ return BuildMI(*MBB, MI, MI.getDebugLoc(), get(IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32)) - .addOperand(*Dst) + .add(*Dst) .addImm(0) // Src0 mods - .addOperand(*Src0) + .add(*Src0) .addImm(0) // Src1 mods - .addOperand(*Src1) + .add(*Src1) .addImm(0) // Src mods - .addOperand(*Src2) + .add(*Src2) .addImm(0) // clamp .addImm(0); // omod } @@ -2238,7 +2239,7 @@ unsigned Reg = MRI.createVirtualRegister(VRC); DebugLoc DL = MBB->findDebugLoc(I); - BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).addOperand(MO); + BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); MO.ChangeToRegister(Reg, false); } @@ -2564,8 +2565,8 @@ return; unsigned DstReg = MRI.createVirtualRegister(DstRC); - MachineInstr *Copy = BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg) - .addOperand(Op); + MachineInstr *Copy = + BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); Op.setReg(DstReg); Op.setSubReg(0); @@ -2810,13 +2811,13 @@ // Regular buffer load / store. MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) - .addOperand(*VData) + .add(*VData) .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. // This will be replaced later // with the new value of vaddr. - .addOperand(*SRsrc) - .addOperand(*SOffset) - .addOperand(*Offset); + .add(*SRsrc) + .add(*SOffset) + .add(*Offset); // Atomics do not have this operand. if (const MachineOperand *GLC = @@ -2836,14 +2837,14 @@ } else { // Atomics with return. Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) - .addOperand(*VData) - .addOperand(*VDataIn) + .add(*VData) + .add(*VDataIn) .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. // This will be replaced later // with the new value of vaddr. - .addOperand(*SRsrc) - .addOperand(*SOffset) - .addOperand(*Offset) + .add(*SRsrc) + .add(*SOffset) + .add(*Offset) .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); } @@ -3112,15 +3113,13 @@ const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); - BuildMI(MBB, MII, DL, InstDesc, DestSub0) - .addOperand(SrcReg0Sub0); + BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); - BuildMI(MBB, MII, DL, InstDesc, DestSub1) - .addOperand(SrcReg0Sub1); + BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) @@ -3174,8 +3173,8 @@ unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) - .addOperand(SrcReg0Sub0) - .addOperand(SrcReg1Sub0); + .add(SrcReg0Sub0) + .add(SrcReg1Sub0); MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); @@ -3184,8 +3183,8 @@ unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) - .addOperand(SrcReg0Sub1) - .addOperand(SrcReg1Sub1); + .add(SrcReg0Sub1) + .add(SrcReg1Sub1); unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) @@ -3231,13 +3230,9 @@ MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, AMDGPU::sub1, SrcSubRC); - BuildMI(MBB, MII, DL, InstDesc, MidReg) - .addOperand(SrcRegSub0) - .addImm(0); + BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); - BuildMI(MBB, MII, DL, InstDesc, ResultReg) - .addOperand(SrcRegSub1) - .addReg(MidReg); + BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); MRI.replaceRegWith(Dest.getReg(), ResultReg); Index: lib/Target/AMDGPU/SILoadStoreOptimizer.cpp =================================================================== --- lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -360,25 +360,24 @@ unsigned DestReg = MRI->createVirtualRegister(SuperRC); DebugLoc DL = I->getDebugLoc(); - MachineInstrBuilder Read2 - = BuildMI(*MBB, Paired, DL, Read2Desc, DestReg) - .addOperand(*AddrReg) // addr - .addImm(NewOffset0) // offset0 - .addImm(NewOffset1) // offset1 - .addImm(0) // gds - .addMemOperand(*I->memoperands_begin()) - .addMemOperand(*Paired->memoperands_begin()); + MachineInstrBuilder Read2 = BuildMI(*MBB, Paired, DL, Read2Desc, DestReg) + .add(*AddrReg) // addr + .addImm(NewOffset0) // offset0 + .addImm(NewOffset1) // offset1 + .addImm(0) // gds + .addMemOperand(*I->memoperands_begin()) + .addMemOperand(*Paired->memoperands_begin()); (void)Read2; const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); // Copy to the old destination registers. BuildMI(*MBB, Paired, DL, CopyDesc) - .addOperand(*Dest0) // Copy to same destination including flags and sub reg. - .addReg(DestReg, 0, SubRegIdx0); + .add(*Dest0) // Copy to same destination including flags and sub reg. + .addReg(DestReg, 0, SubRegIdx0); MachineInstr *Copy1 = BuildMI(*MBB, Paired, DL, CopyDesc) - .addOperand(*Dest1) - .addReg(DestReg, RegState::Kill, SubRegIdx1); + .add(*Dest1) + .addReg(DestReg, RegState::Kill, SubRegIdx1); moveInstsAfter(Copy1, InstsToMove); @@ -436,16 +435,15 @@ const MCInstrDesc &Write2Desc = TII->get(Opc); DebugLoc DL = I->getDebugLoc(); - MachineInstrBuilder Write2 - = BuildMI(*MBB, Paired, DL, Write2Desc) - .addOperand(*Addr) // addr - .addOperand(*Data0) // data0 - .addOperand(*Data1) // data1 - .addImm(NewOffset0) // offset0 - .addImm(NewOffset1) // offset1 - .addImm(0) // gds - .addMemOperand(*I->memoperands_begin()) - .addMemOperand(*Paired->memoperands_begin()); + MachineInstrBuilder Write2 = BuildMI(*MBB, Paired, DL, Write2Desc) + .add(*Addr) // addr + .add(*Data0) // data0 + .add(*Data1) // data1 + .addImm(NewOffset0) // offset0 + .addImm(NewOffset1) // offset1 + .addImm(0) // gds + .addMemOperand(*I->memoperands_begin()) + .addMemOperand(*Paired->memoperands_begin()); moveInstsAfter(Write2, InstsToMove); Index: lib/Target/AMDGPU/SILowerControlFlow.cpp =================================================================== --- lib/Target/AMDGPU/SILowerControlFlow.cpp +++ lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -175,9 +175,8 @@ // Insert a pseudo terminator to help keep the verifier happy. This will also // be used later when inserting skips. - MachineInstr *NewBr = - BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) - .addOperand(MI.getOperand(2)); + MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) + .add(MI.getOperand(2)); if (!LIS) { MI.eraseFromParent(); @@ -221,7 +220,7 @@ // the src like it does. unsigned CopyReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg) - .addOperand(MI.getOperand(1)); // Saved EXEC + .add(MI.getOperand(1)); // Saved EXEC // This must be inserted before phis and any spill code inserted before the // else. @@ -283,10 +282,9 @@ const DebugLoc &DL = MI.getDebugLoc(); unsigned Dst = MI.getOperand(0).getReg(); - MachineInstr *Or = - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) - .addReg(AMDGPU::EXEC) - .addOperand(MI.getOperand(1)); + MachineInstr *Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) + .addReg(AMDGPU::EXEC) + .add(MI.getOperand(1)); if (LIS) LIS->ReplaceMachineInstrInMaps(MI, *Or); @@ -306,13 +304,13 @@ const DebugLoc &DL = MI.getDebugLoc(); MachineInstr *AndN2 = - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) - .addOperand(MI.getOperand(0)); + BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term), AMDGPU::EXEC) + .addReg(AMDGPU::EXEC) + .add(MI.getOperand(0)); MachineInstr *Branch = - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) - .addOperand(MI.getOperand(1)); + BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) + .add(MI.getOperand(1)); if (LIS) { LIS->ReplaceMachineInstrInMaps(MI, *AndN2); @@ -328,9 +326,9 @@ MachineBasicBlock::iterator InsPt = MBB.begin(); MachineInstr *NewMI = - BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) - .addOperand(MI.getOperand(0)); + BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) + .addReg(AMDGPU::EXEC) + .add(MI.getOperand(0)); if (LIS) LIS->ReplaceMachineInstrInMaps(MI, *NewMI); Index: lib/Target/AMDGPU/SILowerI1Copies.cpp =================================================================== --- lib/Target/AMDGPU/SILowerI1Copies.cpp +++ lib/Target/AMDGPU/SILowerI1Copies.cpp @@ -114,18 +114,18 @@ assert(Val == 0 || Val == -1); BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_MOV_B32_e32)) - .addOperand(Dst) - .addImm(Val); + .add(Dst) + .addImm(Val); MI.eraseFromParent(); continue; } } BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64)) - .addOperand(Dst) - .addImm(0) - .addImm(-1) - .addOperand(Src); + .add(Dst) + .addImm(0) + .addImm(-1) + .add(Src); MI.eraseFromParent(); } else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) && SrcRC == &AMDGPU::VReg_1RegClass) { @@ -140,14 +140,14 @@ MRI.getRegClass(DefInst->getOperand(3).getReg()), &AMDGPU::SGPR_64RegClass)) { BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64)) - .addOperand(Dst) - .addReg(AMDGPU::EXEC) - .addOperand(DefInst->getOperand(3)); + .add(Dst) + .addReg(AMDGPU::EXEC) + .add(DefInst->getOperand(3)); } else { BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64)) - .addOperand(Dst) - .addOperand(Src) - .addImm(0); + .add(Dst) + .add(Src) + .addImm(0); } MI.eraseFromParent(); } Index: lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.cpp +++ lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -415,14 +415,14 @@ unsigned Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata)->getReg(); BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp)) - .addReg(Reg, getDefRegState(!IsStore)) - .addOperand(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)) - .addOperand(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)) - .addImm(Offset) - .addImm(0) // glc - .addImm(0) // slc - .addImm(0) // tfe - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + .addReg(Reg, getDefRegState(!IsStore)) + .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)) + .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)) + .addImm(Offset) + .addImm(0) // glc + .addImm(0) // slc + .addImm(0) // tfe + .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); return true; } Index: lib/Target/AMDGPU/SIShrinkInstructions.cpp =================================================================== --- lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -467,26 +467,26 @@ int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); if (Op32DstIdx != -1) { // dst - Inst32.addOperand(MI.getOperand(0)); + Inst32.add(MI.getOperand(0)); } else { assert(MI.getOperand(0).getReg() == AMDGPU::VCC && "Unexpected case"); } - Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0)); + Inst32.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0)); const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); if (Src1) - Inst32.addOperand(*Src1); + Inst32.add(*Src1); const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); if (Src2) { int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); if (Op32Src2Idx != -1) { - Inst32.addOperand(*Src2); + Inst32.add(*Src2); } else { // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is // replaced with an implicit read of vcc. This was already added Index: lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.cpp +++ lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -437,14 +437,18 @@ else BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); } else - BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) - .addImm(Cond[0].getImm()).addOperand(Cond[1]); + BuildMI(&MBB, DL, get(BccOpc)) + .addMBB(TBB) + .addImm(Cond[0].getImm()) + .add(Cond[1]); return 1; } // Two-way conditional branch. - BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) - .addImm(Cond[0].getImm()).addOperand(Cond[1]); + BuildMI(&MBB, DL, get(BccOpc)) + .addMBB(TBB) + .addImm(Cond[0].getImm()) + .add(Cond[1]); if (isThumb) BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0); else @@ -1279,7 +1283,7 @@ LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA_UPD : isThumb1 ? ARM::tLDMIA_UPD : ARM::LDMIA_UPD)) - .addOperand(MI->getOperand(1)); + .add(MI->getOperand(1)); } else { LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA)); } @@ -1288,13 +1292,13 @@ STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA_UPD : isThumb1 ? ARM::tSTMIA_UPD : ARM::STMIA_UPD)) - .addOperand(MI->getOperand(0)); + .add(MI->getOperand(0)); } else { STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA)); } - LDM.addOperand(MI->getOperand(3)).add(predOps(ARMCC::AL)); - STM.addOperand(MI->getOperand(2)).add(predOps(ARMCC::AL)); + LDM.add(MI->getOperand(3)).add(predOps(ARMCC::AL)); + STM.add(MI->getOperand(2)).add(predOps(ARMCC::AL)); // Sort the scratch registers into ascending order. const TargetRegisterInfo &TRI = getRegisterInfo(); @@ -1951,14 +1955,14 @@ const MCInstrDesc &DefDesc = DefMI->getDesc(); for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) - NewMI.addOperand(DefMI->getOperand(i)); + NewMI.add(DefMI->getOperand(i)); unsigned CondCode = MI.getOperand(3).getImm(); if (Invert) NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode))); else NewMI.addImm(CondCode); - NewMI.addOperand(MI.getOperand(4)); + NewMI.add(MI.getOperand(4)); // DefMI is not the -S version that sets CPSR, so add an optional %noreg. if (NewMI->hasOptionalDef()) @@ -1969,7 +1973,7 @@ // The tie makes the register allocator ensure the FalseReg is allocated the // same register as operand 0. FalseReg.setImplicit(); - NewMI.addOperand(FalseReg); + NewMI.add(FalseReg); NewMI->tieOperands(0, NewMI->getNumOperands() - 1); // Update SeenMIs set: register newly created MI and erase removed DefMI. @@ -2185,7 +2189,7 @@ // Add the complete list back in. MachineInstrBuilder MIB(MF, &*MI); for (int i = RegList.size() - 1; i >= 0; --i) - MIB.addOperand(RegList[i]); + MIB.add(RegList[i]); return true; } Index: lib/Target/ARM/ARMConstantIslandPass.cpp =================================================================== --- lib/Target/ARM/ARMConstantIslandPass.cpp +++ lib/Target/ARM/ARMConstantIslandPass.cpp @@ -1477,7 +1477,9 @@ // add it to the island. U.HighWaterMark = NewIsland; U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc()) - .addImm(ID).addOperand(CPEMI->getOperand(1)).addImm(Size); + .addImm(ID) + .add(CPEMI->getOperand(1)) + .addImm(Size); CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1)); ++NumCPEs; @@ -1709,8 +1711,8 @@ MI->getNumExplicitOperands() == 3) { // Create the new insn and copy the predicate from the old. BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)); + .add(MI->getOperand(0)) + .add(MI->getOperand(1)); MI->eraseFromParent(); MadeChange = true; } Index: lib/Target/ARM/ARMExpandPseudoInsts.cpp =================================================================== --- lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -97,9 +97,9 @@ const MachineOperand &MO = OldMI.getOperand(i); assert(MO.isReg() && MO.getReg()); if (MO.isUse()) - UseMI.addOperand(MO); + UseMI.add(MO); else - DefMI.addOperand(MO); + DefMI.add(MO); } } @@ -415,14 +415,14 @@ MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead)); if (TableEntry->isUpdating) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the addrmode6 operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the am6offset operand. if (TableEntry->hasWritebackOperand) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // For an instruction writing double-spaced subregs, the pseudo instruction // has an extra operand that is a use of the super-register. Record the @@ -432,15 +432,15 @@ SrcOpIdx = OpIdx++; // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the super-register source operand used for double-spaced subregs over // to the new instruction as an implicit operand. if (SrcOpIdx != 0) { MachineOperand MO = MI.getOperand(SrcOpIdx); MO.setImplicit(true); - MIB.addOperand(MO); + MIB.add(MO); } // Add an implicit def for the super-register. MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead)); @@ -467,14 +467,14 @@ TII->get(TableEntry->RealOpc)); unsigned OpIdx = 0; if (TableEntry->isUpdating) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the addrmode6 operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the am6offset operand. if (TableEntry->hasWritebackOperand) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); bool SrcIsKill = MI.getOperand(OpIdx).isKill(); bool SrcIsUndef = MI.getOperand(OpIdx).isUndef(); @@ -490,8 +490,8 @@ MIB.addReg(D3, getUndefRegState(SrcIsUndef)); // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); if (SrcIsKill && !SrcIsUndef) // Add an implicit kill for the super-reg. MIB->addRegisterKilled(SrcReg, TRI, true); @@ -549,14 +549,14 @@ } if (TableEntry->isUpdating) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the addrmode6 operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the am6offset operand. if (TableEntry->hasWritebackOperand) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Grab the super-register source. MachineOperand MO = MI.getOperand(OpIdx++); @@ -579,12 +579,12 @@ OpIdx += 1; // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the super-register source to be an implicit source. MO.setImplicit(true); - MIB.addOperand(MO); + MIB.add(MO); if (TableEntry->IsLoad) // Add an implicit def for the super-register. MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead)); @@ -605,9 +605,9 @@ unsigned OpIdx = 0; // Transfer the destination register operand. - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); if (IsExt) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); bool SrcIsKill = MI.getOperand(OpIdx).isKill(); unsigned SrcReg = MI.getOperand(OpIdx++).getReg(); @@ -616,11 +616,11 @@ MIB.addReg(D0); // Copy the other source register operand. - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Add an implicit kill and use for the super-reg. MIB.addReg(SrcReg, RegState::Implicit | getKillRegState(SrcIsKill)); @@ -819,7 +819,7 @@ unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr; BuildMI(LoadCmpBB, DL, TII->get(CMPrr)) .addReg(Dest.getReg(), getKillRegState(Dest.isDead())) - .addOperand(Desired) + .add(Desired) .add(predOps(ARMCC::AL)); unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc; BuildMI(LoadCmpBB, DL, TII->get(Bcc)) @@ -839,8 +839,8 @@ MIB = BuildMI(StoreBB, DL, TII->get(StrexOp), StatusReg); - MIB.addOperand(New); - MIB.addOperand(Addr); + MIB.add(New); + MIB.add(Addr); if (StrexOp == ARM::t2STREX) MIB.addImm(0); // a 32-bit Thumb strex (only) allows an offset. MIB.add(predOps(ARMCC::AL)); @@ -961,7 +961,7 @@ unsigned STREXD = IsThumb ? ARM::t2STREXD : ARM::STREXD; MIB = BuildMI(StoreBB, DL, TII->get(STREXD), StatusReg); addExclusiveRegPair(MIB, New, 0, IsThumb, TRI); - MIB.addOperand(Addr).add(predOps(ARMCC::AL)); + MIB.add(Addr).add(predOps(ARMCC::AL)); unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri; BuildMI(StoreBB, DL, TII->get(CMPri)) @@ -1049,9 +1049,9 @@ unsigned newOpc = Opcode == ARM::VMOVScc ? ARM::VMOVS : ARM::VMOVD; BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(newOpc), MI.getOperand(1).getReg()) - .addOperand(MI.getOperand(2)) - .addImm(MI.getOperand(3).getImm()) // 'pred' - .addOperand(MI.getOperand(4)); + .add(MI.getOperand(2)) + .addImm(MI.getOperand(3).getImm()) // 'pred' + .add(MI.getOperand(4)); MI.eraseFromParent(); return true; @@ -1061,10 +1061,10 @@ unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVr : ARM::MOVr; BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc), MI.getOperand(1).getReg()) - .addOperand(MI.getOperand(2)) - .addImm(MI.getOperand(3).getImm()) // 'pred' - .addOperand(MI.getOperand(4)) - .addReg(0); // 's' bit + .add(MI.getOperand(2)) + .addImm(MI.getOperand(3).getImm()) // 'pred' + .add(MI.getOperand(4)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; @@ -1072,11 +1072,11 @@ case ARM::MOVCCsi: { BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi), (MI.getOperand(1).getReg())) - .addOperand(MI.getOperand(2)) - .addImm(MI.getOperand(3).getImm()) - .addImm(MI.getOperand(4).getImm()) // 'pred' - .addOperand(MI.getOperand(5)) - .addReg(0); // 's' bit + .add(MI.getOperand(2)) + .addImm(MI.getOperand(3).getImm()) + .addImm(MI.getOperand(4).getImm()) // 'pred' + .add(MI.getOperand(5)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; @@ -1084,12 +1084,12 @@ case ARM::MOVCCsr: { BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsr), (MI.getOperand(1).getReg())) - .addOperand(MI.getOperand(2)) - .addOperand(MI.getOperand(3)) - .addImm(MI.getOperand(4).getImm()) - .addImm(MI.getOperand(5).getImm()) // 'pred' - .addOperand(MI.getOperand(6)) - .addReg(0); // 's' bit + .add(MI.getOperand(2)) + .add(MI.getOperand(3)) + .addImm(MI.getOperand(4).getImm()) + .addImm(MI.getOperand(5).getImm()) // 'pred' + .add(MI.getOperand(6)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; @@ -1099,9 +1099,9 @@ unsigned NewOpc = AFI->isThumbFunction() ? ARM::t2MOVi16 : ARM::MOVi16; BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc), MI.getOperand(1).getReg()) - .addImm(MI.getOperand(2).getImm()) - .addImm(MI.getOperand(3).getImm()) // 'pred' - .addOperand(MI.getOperand(4)); + .addImm(MI.getOperand(2).getImm()) + .addImm(MI.getOperand(3).getImm()) // 'pred' + .add(MI.getOperand(4)); MI.eraseFromParent(); return true; } @@ -1110,10 +1110,10 @@ unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVi : ARM::MOVi; BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc), MI.getOperand(1).getReg()) - .addImm(MI.getOperand(2).getImm()) - .addImm(MI.getOperand(3).getImm()) // 'pred' - .addOperand(MI.getOperand(4)) - .addReg(0); // 's' bit + .addImm(MI.getOperand(2).getImm()) + .addImm(MI.getOperand(3).getImm()) // 'pred' + .add(MI.getOperand(4)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; @@ -1123,10 +1123,10 @@ unsigned Opc = AFI->isThumbFunction() ? ARM::t2MVNi : ARM::MVNi; BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc), MI.getOperand(1).getReg()) - .addImm(MI.getOperand(2).getImm()) - .addImm(MI.getOperand(3).getImm()) // 'pred' - .addOperand(MI.getOperand(4)) - .addReg(0); // 's' bit + .addImm(MI.getOperand(2).getImm()) + .addImm(MI.getOperand(3).getImm()) // 'pred' + .add(MI.getOperand(4)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; @@ -1145,11 +1145,11 @@ } BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc), MI.getOperand(1).getReg()) - .addOperand(MI.getOperand(2)) - .addImm(MI.getOperand(3).getImm()) - .addImm(MI.getOperand(4).getImm()) // 'pred' - .addOperand(MI.getOperand(5)) - .addReg(0); // 's' bit + .add(MI.getOperand(2)) + .addImm(MI.getOperand(3).getImm()) + .addImm(MI.getOperand(4).getImm()) // 'pred' + .add(MI.getOperand(5)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; } @@ -1206,7 +1206,7 @@ // These are just fancy MOVs instructions. BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(ARM_AM::getSORegOpc( (Opcode == ARM::MOVsrl_flag ? ARM_AM::lsr : ARM_AM::asr), 1)) .add(predOps(ARMCC::AL)) @@ -1219,7 +1219,7 @@ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(ARM_AM::getSORegOpc(ARM_AM::rrx, 0)) .add(predOps(ARMCC::AL)) .addReg(0); @@ -1253,14 +1253,14 @@ bool DstIsDead = MI.getOperand(0).isDead(); MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .add(predOps(ARMCC::AL)); MIB1->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), - TII->get(ARM::tPICADD)) - .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead)) - .addReg(DstReg) - .addOperand(MI.getOperand(2)); + MachineInstrBuilder MIB2 = + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD)) + .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstReg) + .add(MI.getOperand(2)); TransferImpOps(MI, MIB1, MIB2); MI.eraseFromParent(); return true; @@ -1372,9 +1372,9 @@ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::SUBri), ARM::PC) .addReg(ARM::LR) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) .addReg(ARM::CPSR, RegState::Undef); TransferImpOps(MI, MIB, MIB); MI.eraseFromParent(); @@ -1391,11 +1391,11 @@ unsigned DstReg = MI.getOperand(OpIdx++).getReg(); // Copy the source register. - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Add the destination operands (D subregs). unsigned D0 = TRI->getSubReg(DstReg, ARM::dsub_0); @@ -1422,11 +1422,11 @@ unsigned SrcReg = MI.getOperand(OpIdx++).getReg(); // Copy the destination register. - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Add the source operands (D subregs). unsigned D0 = TRI->getSubReg(SrcReg, ARM::dsub_0); Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -8799,11 +8799,11 @@ // Thumb1 post-indexed loads are really just single-register LDMs. case ARM::tLDR_postidx: { BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD)) - .addOperand(MI.getOperand(1)) // Rn_wb - .addOperand(MI.getOperand(2)) // Rn - .addOperand(MI.getOperand(3)) // PredImm - .addOperand(MI.getOperand(4)) // PredReg - .addOperand(MI.getOperand(0)); // Rt + .add(MI.getOperand(1)) // Rn_wb + .add(MI.getOperand(2)) // Rn + .add(MI.getOperand(3)) // PredImm + .add(MI.getOperand(4)) // PredReg + .add(MI.getOperand(0)); // Rt MI.eraseFromParent(); return BB; } @@ -8834,12 +8834,12 @@ MachineMemOperand *MMO = *MI.memoperands_begin(); BuildMI(*BB, MI, dl, TII->get(NewOpc)) - .addOperand(MI.getOperand(0)) // Rn_wb - .addOperand(MI.getOperand(1)) // Rt - .addOperand(MI.getOperand(2)) // Rn - .addImm(Offset) // offset (skip GPR==zero_reg) - .addOperand(MI.getOperand(5)) // pred - .addOperand(MI.getOperand(6)) + .add(MI.getOperand(0)) // Rn_wb + .add(MI.getOperand(1)) // Rt + .add(MI.getOperand(2)) // Rn + .addImm(Offset) // offset (skip GPR==zero_reg) + .add(MI.getOperand(5)) // pred + .add(MI.getOperand(6)) .addMemOperand(MMO); MI.eraseFromParent(); return BB; @@ -8856,7 +8856,7 @@ } MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); for (unsigned i = 0; i < MI.getNumOperands(); ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); MI.eraseFromParent(); return BB; } Index: lib/Target/ARM/ARMLoadStoreOptimizer.cpp =================================================================== --- lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1259,7 +1259,7 @@ // Transfer the rest of operands. for (unsigned OpNum = 3, e = MI->getNumOperands(); OpNum != e; ++OpNum) - MIB.addOperand(MI->getOperand(OpNum)); + MIB.add(MI->getOperand(OpNum)); // Transfer memoperands. MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); @@ -1462,12 +1462,10 @@ DebugLoc DL = MI.getDebugLoc(); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc)); if (NewOpc == ARM::t2LDRD_PRE || NewOpc == ARM::t2LDRD_POST) { - MIB.addOperand(Reg0Op).addOperand(Reg1Op) - .addReg(BaseOp.getReg(), RegState::Define); + MIB.add(Reg0Op).add(Reg1Op).addReg(BaseOp.getReg(), RegState::Define); } else { assert(NewOpc == ARM::t2STRD_PRE || NewOpc == ARM::t2STRD_POST); - MIB.addReg(BaseOp.getReg(), RegState::Define) - .addOperand(Reg0Op).addOperand(Reg1Op); + MIB.addReg(BaseOp.getReg(), RegState::Define).add(Reg0Op).add(Reg1Op); } MIB.addReg(BaseOp.getReg(), RegState::Kill) .addImm(Offset).addImm(Pred).addReg(PredReg); @@ -1477,7 +1475,7 @@ // Transfer implicit operands. for (const MachineOperand &MO : MI.implicit_operands()) - MIB.addOperand(MO); + MIB.add(MO); MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); MBB.erase(MBBI); Index: lib/Target/ARM/Thumb1FrameLowering.cpp =================================================================== --- lib/Target/ARM/Thumb1FrameLowering.cpp +++ lib/Target/ARM/Thumb1FrameLowering.cpp @@ -501,7 +501,7 @@ // Copy implicit ops and popped registers, if any. for (auto MO: MBBI->operands()) if (MO.isReg() && (MO.isImplicit() || MO.isDef())) - MIB.addOperand(MO); + MIB.add(MO); MIB.addReg(ARM::PC, RegState::Define); // Erase the old instruction (tBX_RET or tPOP). MBB.erase(MBBI); @@ -585,7 +585,7 @@ for (auto MO: MBBI->operands()) if (MO.isReg() && (MO.isImplicit() || MO.isDef()) && MO.getReg() != ARM::PC) { - MIB.addOperand(MO); + MIB.add(MO); if (!MO.isImplicit()) Popped = true; } Index: lib/Target/ARM/Thumb2SizeReduction.cpp =================================================================== --- lib/Target/ARM/Thumb2SizeReduction.cpp +++ lib/Target/ARM/Thumb2SizeReduction.cpp @@ -562,8 +562,8 @@ MIB.addReg(MI->getOperand(0).getReg(), RegState::Define | RegState::Dead); if (!isLdStMul) { - MIB.addOperand(MI->getOperand(0)); - MIB.addOperand(MI->getOperand(1)); + MIB.add(MI->getOperand(0)); + MIB.add(MI->getOperand(1)); if (HasImmOffset) MIB.addImm(OffsetImm / Scale); @@ -577,7 +577,7 @@ // Transfer the rest of operands. for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum) - MIB.addOperand(MI->getOperand(OpNum)); + MIB.add(MI->getOperand(OpNum)); // Transfer memoperands. MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); @@ -624,8 +624,8 @@ MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), TII->get(ARM::tADDrSPi)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)) + .add(MI->getOperand(0)) + .add(MI->getOperand(1)) .addImm(Imm / 4) // The tADDrSPi has an implied scale by four. .add(predOps(ARMCC::AL)); @@ -786,7 +786,7 @@ // Add the 16-bit instruction. DebugLoc dl = MI->getDebugLoc(); MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); - MIB.addOperand(MI->getOperand(0)); + MIB.add(MI->getOperand(0)); if (NewMCID.hasOptionalDef()) { if (HasCC) AddDefaultT1CC(MIB, CCDead); @@ -801,7 +801,7 @@ continue; if (SkipPred && MCID.OpInfo[i].isPredicate()) continue; - MIB.addOperand(MI->getOperand(i)); + MIB.add(MI->getOperand(i)); } // Transfer MI flags. @@ -881,7 +881,7 @@ // Add the 16-bit instruction. DebugLoc dl = MI->getDebugLoc(); MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); - MIB.addOperand(MI->getOperand(0)); + MIB.add(MI->getOperand(0)); if (NewMCID.hasOptionalDef()) { if (HasCC) AddDefaultT1CC(MIB, CCDead); @@ -910,7 +910,7 @@ // Skip implicit def of CPSR. Either it's modeled as an optional // def now or it's already an implicit def on the new instruction. continue; - MIB.addOperand(MO); + MIB.add(MO); } if (!MCID.isPredicable() && NewMCID.isPredicable()) MIB.add(predOps(ARMCC::AL)); Index: lib/Target/AVR/AVRExpandPseudoInsts.cpp =================================================================== --- lib/Target/AVR/AVRExpandPseudoInsts.cpp +++ lib/Target/AVR/AVRExpandPseudoInsts.cpp @@ -509,8 +509,8 @@ const BlockAddress *BA = MI.getOperand(1).getBlockAddress(); unsigned TF = MI.getOperand(1).getTargetFlags(); - MIBLO.addOperand(MachineOperand::CreateBA(BA, TF | AVRII::MO_LO)); - MIBHI.addOperand(MachineOperand::CreateBA(BA, TF | AVRII::MO_HI)); + MIBLO.add(MachineOperand::CreateBA(BA, TF | AVRII::MO_LO)); + MIBHI.add(MachineOperand::CreateBA(BA, TF | AVRII::MO_HI)); break; } case MachineOperand::MO_Immediate: { @@ -785,9 +785,8 @@ auto Op1 = MI.getOperand(0); auto Op2 = MI.getOperand(1); - MachineInstr &NewInst = *buildMI(MBB, MBBI, Opcode) - .addOperand(Op1).addOperand(Op2) - .getInstr(); + MachineInstr &NewInst = + *buildMI(MBB, MBBI, Opcode).add(Op1).add(Op2).getInstr(); f(NewInst); }); } @@ -810,15 +809,13 @@ unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr; // Create the load - buildMI(MBB, MBBI, LoadOpcode).addOperand(Op1).addOperand(Op2); + buildMI(MBB, MBBI, LoadOpcode).add(Op1).add(Op2); // Create the arithmetic op - buildMI(MBB, MBBI, ArithOpcode) - .addOperand(Op1).addOperand(Op1) - .addOperand(Op2); + buildMI(MBB, MBBI, ArithOpcode).add(Op1).add(Op1).add(Op2); // Create the store - buildMI(MBB, MBBI, StoreOpcode).addOperand(Op2).addOperand(Op1); + buildMI(MBB, MBBI, StoreOpcode).add(Op2).add(Op1); }); } Index: lib/Target/Hexagon/HexagonBitSimplify.cpp =================================================================== --- lib/Target/Hexagon/HexagonBitSimplify.cpp +++ lib/Target/Hexagon/HexagonBitSimplify.cpp @@ -2599,7 +2599,7 @@ for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) { const MachineOperand &Op = SI->getOperand(j); if (!Op.isReg()) { - MIB.addOperand(Op); + MIB.add(Op); continue; } if (!Op.isUse()) Index: lib/Target/Hexagon/HexagonEarlyIfConv.cpp =================================================================== --- lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -680,12 +680,12 @@ MachineInstrBuilder MIB = BuildMI(*ToB, At, DL, HII->get(COpc)); MachineInstr::mop_iterator MOI = MI->operands_begin(); if (HII->isPostIncrement(*MI)) { - MIB.addOperand(*MOI); + MIB.add(*MOI); ++MOI; } MIB.addReg(PredR); for (const MachineOperand &MO : make_range(MOI, MI->operands_end())) - MIB.addOperand(MO); + MIB.add(MO); // Set memory references. MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); Index: lib/Target/Hexagon/HexagonExpandCondsets.cpp =================================================================== --- lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -595,9 +595,9 @@ .addReg(SrcOp.getReg(), SrcState, SrcOp.getSubReg()); } else { MIB = BuildMI(B, At, DL, HII->get(Opc)) - .addReg(DstR, DstState, DstSR) - .addReg(PredOp.getReg(), PredState, PredOp.getSubReg()) - .addOperand(SrcOp); + .addReg(DstR, DstState, DstSR) + .addReg(PredOp.getReg(), PredState, PredOp.getSubReg()) + .add(SrcOp); } DEBUG(dbgs() << "created an initial copy: " << *MIB); @@ -828,7 +828,7 @@ while (Ox < NP) { MachineOperand &MO = MI.getOperand(Ox); if (!MO.isReg() || !MO.isImplicit()) - MB.addOperand(MO); + MB.add(MO); Ox++; } Index: lib/Target/Hexagon/HexagonFixupHwLoops.cpp =================================================================== --- lib/Target/Hexagon/HexagonFixupHwLoops.cpp +++ lib/Target/Hexagon/HexagonFixupHwLoops.cpp @@ -190,5 +190,5 @@ MIB = BuildMI(*MBB, MII, DL, TII->get(newOp)); for (unsigned i = 0; i < MII->getNumOperands(); ++i) - MIB.addOperand(MII->getOperand(i)); + MIB.add(MII->getOperand(i)); } Index: lib/Target/Hexagon/HexagonFrameLowering.cpp =================================================================== --- lib/Target/Hexagon/HexagonFrameLowering.cpp +++ lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -1473,8 +1473,7 @@ return false; unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); - BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR) - .addOperand(MI->getOperand(1)); + BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR).add(MI->getOperand(1)); BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR) .addReg(TmpR, RegState::Kill); @@ -2221,7 +2220,7 @@ if (SrcRR.Reg != FoundR || SrcRR.Sub != 0) { const DebugLoc &DL = SI.getDebugLoc(); CopyIn = BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR) - .addOperand(SrcOp); + .add(SrcOp); } ++StartIt; Index: lib/Target/Hexagon/HexagonGenMux.cpp =================================================================== --- lib/Target/Hexagon/HexagonGenMux.cpp +++ lib/Target/Hexagon/HexagonGenMux.cpp @@ -324,9 +324,9 @@ if (!MxOpc) continue; BuildMI(B, MX.At, DL, HII->get(MxOpc), MX.DefR) - .addReg(MX.PredR) - .addOperand(*MX.SrcT) - .addOperand(*MX.SrcF); + .addReg(MX.PredR) + .add(*MX.SrcT) + .add(*MX.SrcF); B.erase(MX.Def1); B.erase(MX.Def2); Changed = true; Index: lib/Target/Hexagon/HexagonInstrInfo.cpp =================================================================== --- lib/Target/Hexagon/HexagonInstrInfo.cpp +++ lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1074,13 +1074,13 @@ unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6; MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addImm(MI.getOperand(1).getImm()) .addReg(SrcSubLo) .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); MI1New->getOperand(0).setIsKill(false); BuildMI(MBB, MI, DL, get(NewOpc)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) // The Vectors are indexed in multiples of vector size. .addImm(MI.getOperand(1).getImm() + Offset) .addReg(SrcSubHi) @@ -1106,15 +1106,13 @@ unsigned DstReg = MI.getOperand(0).getReg(); unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6; - MachineInstr *MI1New = - BuildMI(MBB, MI, DL, get(NewOpc), - HRI.getSubReg(DstReg, Hexagon::vsub_lo)) - .addOperand(MI.getOperand(1)) - .addImm(MI.getOperand(2).getImm()); + MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc), + HRI.getSubReg(DstReg, Hexagon::vsub_lo)) + .add(MI.getOperand(1)) + .addImm(MI.getOperand(2).getImm()); MI1New->getOperand(1).setIsKill(false); - BuildMI(MBB, MI, DL, get(NewOpc), - HRI.getSubReg(DstReg, Hexagon::vsub_hi)) - .addOperand(MI.getOperand(1)) + BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi)) + .add(MI.getOperand(1)) // The Vectors are indexed in multiples of vector size. .addImm(MI.getOperand(2).getImm() + Offset) .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); @@ -1227,18 +1225,18 @@ bool IsDestLive = !LiveAtMI.available(MRI, Op0.getReg()); if (Op0.getReg() != Op2.getReg()) { auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov)) - .addOperand(Op0) - .addOperand(Op1) - .addOperand(Op2); + .add(Op0) + .add(Op1) + .add(Op2); if (IsDestLive) T.addReg(Op0.getReg(), RegState::Implicit); IsDestLive = true; } if (Op0.getReg() != Op3.getReg()) { auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov)) - .addOperand(Op0) - .addOperand(Op1) - .addOperand(Op3); + .add(Op0) + .add(Op1) + .add(Op3); if (IsDestLive) T.addReg(Op0.getReg(), RegState::Implicit); } @@ -1259,10 +1257,10 @@ unsigned SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo); unsigned SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi); auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine)) - .addOperand(Op0) - .addOperand(Op1) - .addReg(SrcHi) - .addReg(SrcLo); + .add(Op0) + .add(Op1) + .addReg(SrcHi) + .addReg(SrcLo); if (IsDestLive) T.addReg(Op0.getReg(), RegState::Implicit); IsDestLive = true; @@ -1271,10 +1269,10 @@ unsigned SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo); unsigned SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi); auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine)) - .addOperand(Op0) - .addOperand(Op1) - .addReg(SrcHi) - .addReg(SrcLo); + .add(Op0) + .add(Op1) + .addReg(SrcHi) + .addReg(SrcLo); if (IsDestLive) T.addReg(Op0.getReg(), RegState::Implicit); } @@ -1376,7 +1374,7 @@ MachineOperand &Op = MI.getOperand(NOp); if (!Op.isReg() || !Op.isDef() || Op.isImplicit()) break; - T.addOperand(Op); + T.add(Op); NOp++; } @@ -1386,7 +1384,7 @@ assert(GotPredReg); T.addReg(PredReg, PredRegFlags); while (NOp < NumOps) - T.addOperand(MI.getOperand(NOp++)); + T.add(MI.getOperand(NOp++)); MI.setDesc(get(PredOpc)); while (unsigned n = MI.getNumOperands()) Index: lib/Target/Hexagon/HexagonOptAddrMode.cpp =================================================================== --- lib/Target/Hexagon/HexagonOptAddrMode.cpp +++ lib/Target/Hexagon/HexagonOptAddrMode.cpp @@ -333,17 +333,17 @@ short NewOpCode = HII->getBaseWithLongOffset(*OldMI); assert(NewOpCode >= 0 && "Invalid New opcode\n"); MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode)); - MIB.addOperand(OldMI->getOperand(0)); - MIB.addOperand(OldMI->getOperand(2)); - MIB.addOperand(OldMI->getOperand(3)); - MIB.addOperand(ImmOp); + MIB.add(OldMI->getOperand(0)); + MIB.add(OldMI->getOperand(2)); + MIB.add(OldMI->getOperand(3)); + MIB.add(ImmOp); OpStart = 4; Changed = true; } else if (HII->getAddrMode(*OldMI) == HexagonII::BaseImmOffset) { short NewOpCode = HII->getAbsoluteForm(*OldMI); assert(NewOpCode >= 0 && "Invalid New opcode\n"); MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode)) - .addOperand(OldMI->getOperand(0)); + .add(OldMI->getOperand(0)); const GlobalValue *GV = ImmOp.getGlobal(); int64_t Offset = ImmOp.getOffset() + OldMI->getOperand(2).getImm(); @@ -359,9 +359,9 @@ short NewOpCode = HII->xformRegToImmOffset(*OldMI); assert(NewOpCode >= 0 && "Invalid New opcode\n"); MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode)); - MIB.addOperand(OldMI->getOperand(0)); - MIB.addOperand(OldMI->getOperand(1)); - MIB.addOperand(ImmOp); + MIB.add(OldMI->getOperand(0)); + MIB.add(OldMI->getOperand(1)); + MIB.add(ImmOp); OpStart = 4; Changed = true; DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n"); @@ -370,7 +370,7 @@ if (Changed) for (unsigned i = OpStart; i < OpEnd; ++i) - MIB.addOperand(OldMI->getOperand(i)); + MIB.add(OldMI->getOperand(i)); return Changed; } @@ -390,10 +390,10 @@ short NewOpCode = HII->getBaseWithLongOffset(*OldMI); assert(NewOpCode >= 0 && "Invalid New opcode\n"); MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode)); - MIB.addOperand(OldMI->getOperand(1)); - MIB.addOperand(OldMI->getOperand(2)); - MIB.addOperand(ImmOp); - MIB.addOperand(OldMI->getOperand(3)); + MIB.add(OldMI->getOperand(1)); + MIB.add(OldMI->getOperand(2)); + MIB.add(ImmOp); + MIB.add(OldMI->getOperand(3)); OpStart = 4; } else if (HII->getAddrMode(*OldMI) == HexagonII::BaseImmOffset) { short NewOpCode = HII->getAbsoluteForm(*OldMI); @@ -402,7 +402,7 @@ const GlobalValue *GV = ImmOp.getGlobal(); int64_t Offset = ImmOp.getOffset() + OldMI->getOperand(1).getImm(); MIB.addGlobalAddress(GV, Offset, ImmOp.getTargetFlags()); - MIB.addOperand(OldMI->getOperand(2)); + MIB.add(OldMI->getOperand(2)); OpStart = 3; } Changed = true; @@ -412,9 +412,9 @@ short NewOpCode = HII->xformRegToImmOffset(*OldMI); assert(NewOpCode >= 0 && "Invalid New opcode\n"); MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode)); - MIB.addOperand(OldMI->getOperand(0)); - MIB.addOperand(ImmOp); - MIB.addOperand(OldMI->getOperand(1)); + MIB.add(OldMI->getOperand(0)); + MIB.add(ImmOp); + MIB.add(OldMI->getOperand(1)); OpStart = 2; Changed = true; DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n"); @@ -422,7 +422,7 @@ } if (Changed) for (unsigned i = OpStart; i < OpEnd; ++i) - MIB.addOperand(OldMI->getOperand(i)); + MIB.add(OldMI->getOperand(i)); return Changed; } @@ -473,26 +473,26 @@ BuildMI(*BB, InsertPt, UseMI->getDebugLoc(), HII->get(NewOpCode)); // change mem(Rs + # ) -> mem(Rt << # + ##) if (UseMID.mayLoad()) { - MIB.addOperand(UseMI->getOperand(0)); - MIB.addOperand(AddAslMI->getOperand(2)); - MIB.addOperand(AddAslMI->getOperand(3)); + MIB.add(UseMI->getOperand(0)); + MIB.add(AddAslMI->getOperand(2)); + MIB.add(AddAslMI->getOperand(3)); const GlobalValue *GV = ImmOp.getGlobal(); MIB.addGlobalAddress(GV, UseMI->getOperand(2).getImm(), ImmOp.getTargetFlags()); OpStart = 3; } else if (UseMID.mayStore()) { - MIB.addOperand(AddAslMI->getOperand(2)); - MIB.addOperand(AddAslMI->getOperand(3)); + MIB.add(AddAslMI->getOperand(2)); + MIB.add(AddAslMI->getOperand(3)); const GlobalValue *GV = ImmOp.getGlobal(); MIB.addGlobalAddress(GV, UseMI->getOperand(1).getImm(), ImmOp.getTargetFlags()); - MIB.addOperand(UseMI->getOperand(2)); + MIB.add(UseMI->getOperand(2)); OpStart = 3; } else llvm_unreachable("Unhandled instruction"); for (unsigned i = OpStart; i < OpEnd; ++i) - MIB.addOperand(UseMI->getOperand(i)); + MIB.add(UseMI->getOperand(i)); Deleted.insert(UseMI); } Index: lib/Target/Lanai/LanaiInstrInfo.cpp =================================================================== --- lib/Target/Lanai/LanaiInstrInfo.cpp +++ lib/Target/Lanai/LanaiInstrInfo.cpp @@ -518,7 +518,7 @@ const MCInstrDesc &DefDesc = DefMI->getDesc(); for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) - NewMI.addOperand(DefMI->getOperand(i)); + NewMI.add(DefMI->getOperand(i)); unsigned CondCode = MI.getOperand(3).getImm(); if (Invert) @@ -531,7 +531,7 @@ // register operand tied to the first def. The tie makes the register // allocator ensure the FalseReg is allocated the same register as operand 0. FalseReg.setImplicit(); - NewMI.addOperand(FalseReg); + NewMI.add(FalseReg); NewMI->tieOperands(0, NewMI->getNumOperands() - 1); // Update SeenMIs set: register newly created MI and erase removed DefMI. Index: lib/Target/MSP430/MSP430BranchSelector.cpp =================================================================== --- lib/Target/MSP430/MSP430BranchSelector.cpp +++ lib/Target/MSP430/MSP430BranchSelector.cpp @@ -194,8 +194,8 @@ // Jump over the long branch on the opposite condition TII->reverseBranchCondition(Cond); MI = BuildMI(*MBB, MI, dl, TII->get(MSP430::JCC)) - .addMBB(NextMBB) - .addOperand(Cond[0]); + .addMBB(NextMBB) + .add(Cond[0]); InstrSizeDiff += TII->getInstSizeInBytes(*MI); ++MI; } Index: lib/Target/Mips/MipsInstrInfo.cpp =================================================================== --- lib/Target/Mips/MipsInstrInfo.cpp +++ lib/Target/Mips/MipsInstrInfo.cpp @@ -482,7 +482,7 @@ MIB->RemoveOperand(0); for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) { - MIB.addOperand(I->getOperand(J)); + MIB.add(I->getOperand(J)); } MIB.addImm(0); @@ -492,7 +492,7 @@ if (BranchWithZeroOperand && (unsigned)ZeroOperandPosition == J) continue; - MIB.addOperand(I->getOperand(J)); + MIB.add(I->getOperand(J)); } } Index: lib/Target/Mips/MipsSEISelLowering.cpp =================================================================== --- lib/Target/Mips/MipsSEISelLowering.cpp +++ lib/Target/Mips/MipsSEISelLowering.cpp @@ -3504,7 +3504,7 @@ MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(UsingMips32 ? Mips::LH : Mips::LH64), Rt); for (unsigned i = 1; i < MI.getNumOperands(); i++) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); BuildMI(*BB, MI, DL, TII->get(Mips::FILL_H), Wd).addReg(Rt); Index: lib/Target/NVPTX/NVPTXPeephole.cpp =================================================================== --- lib/Target/NVPTX/NVPTXPeephole.cpp +++ lib/Target/NVPTX/NVPTXPeephole.cpp @@ -113,7 +113,7 @@ BuildMI(MF, Root.getDebugLoc(), TII->get(Prev.getOpcode()), Root.getOperand(0).getReg()) .addReg(NVPTX::VRFrameLocal) - .addOperand(Prev.getOperand(2)); + .add(Prev.getOperand(2)); MBB.insert((MachineBasicBlock::iterator)&Root, MIB); Index: lib/Target/PowerPC/PPCInstrInfo.cpp =================================================================== --- lib/Target/PowerPC/PPCInstrInfo.cpp +++ lib/Target/PowerPC/PPCInstrInfo.cpp @@ -662,12 +662,14 @@ (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB); else if (Cond[0].getImm() == PPC::PRED_BIT_SET) - BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB); + BuildMI(&MBB, DL, get(PPC::BC)).add(Cond[1]).addMBB(TBB); else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET) - BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB); + BuildMI(&MBB, DL, get(PPC::BCn)).add(Cond[1]).addMBB(TBB); else // Conditional branch BuildMI(&MBB, DL, get(PPC::BCC)) - .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB); + .addImm(Cond[0].getImm()) + .add(Cond[1]) + .addMBB(TBB); return 1; } @@ -677,12 +679,14 @@ (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB); else if (Cond[0].getImm() == PPC::PRED_BIT_SET) - BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB); + BuildMI(&MBB, DL, get(PPC::BC)).add(Cond[1]).addMBB(TBB); else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET) - BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB); + BuildMI(&MBB, DL, get(PPC::BCn)).add(Cond[1]).addMBB(TBB); else BuildMI(&MBB, DL, get(PPC::BCC)) - .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB); + .addImm(Cond[0].getImm()) + .add(Cond[1]) + .addMBB(TBB); BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB); return 2; } Index: lib/Target/PowerPC/PPCMIPeephole.cpp =================================================================== --- lib/Target/PowerPC/PPCMIPeephole.cpp +++ lib/Target/PowerPC/PPCMIPeephole.cpp @@ -147,9 +147,9 @@ << "Optimizing load-and-splat/splat " "to load-and-splat/copy: "); DEBUG(MI.dump()); - BuildMI(MBB, &MI, MI.getDebugLoc(), - TII->get(PPC::COPY), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)); + BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY), + MI.getOperand(0).getReg()) + .add(MI.getOperand(1)); ToErase = &MI; Simplified = true; } @@ -169,9 +169,9 @@ << "Optimizing splat/swap or splat/splat " "to splat/copy: "); DEBUG(MI.dump()); - BuildMI(MBB, &MI, MI.getDebugLoc(), - TII->get(PPC::COPY), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)); + BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY), + MI.getOperand(0).getReg()) + .add(MI.getOperand(1)); ToErase = &MI; Simplified = true; } @@ -194,9 +194,9 @@ else if (Immed == 2 && FeedImmed == 2 && FeedReg1 == FeedReg2) { DEBUG(dbgs() << "Optimizing swap/swap => copy: "); DEBUG(MI.dump()); - BuildMI(MBB, &MI, MI.getDebugLoc(), - TII->get(PPC::COPY), MI.getOperand(0).getReg()) - .addOperand(DefMI->getOperand(1)); + BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY), + MI.getOperand(0).getReg()) + .add(DefMI->getOperand(1)); ToErase = &MI; Simplified = true; } @@ -251,7 +251,7 @@ DEBUG(MI.dump()); BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(OpNo)); + .add(MI.getOperand(OpNo)); ToErase = &MI; Simplified = true; } Index: lib/Target/PowerPC/PPCVSXCopy.cpp =================================================================== --- lib/Target/PowerPC/PPCVSXCopy.cpp +++ lib/Target/PowerPC/PPCVSXCopy.cpp @@ -112,7 +112,7 @@ TII->get(TargetOpcode::SUBREG_TO_REG), NewVReg) .addImm(1) // add 1, not 0, because there is no implicit clearing // of the high bits. - .addOperand(SrcMO) + .add(SrcMO) .addImm(PPC::sub_64); // The source of the original copy is now the new virtual register. @@ -132,7 +132,7 @@ unsigned NewVReg = MRI.createVirtualRegister(DstRC); BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(TargetOpcode::COPY), NewVReg) - .addOperand(SrcMO); + .add(SrcMO); // Transform the original copy into a subregister extraction copy. SrcMO.setReg(NewVReg); Index: lib/Target/PowerPC/PPCVSXSwapRemoval.cpp =================================================================== --- lib/Target/PowerPC/PPCVSXSwapRemoval.cpp +++ lib/Target/PowerPC/PPCVSXSwapRemoval.cpp @@ -936,9 +936,9 @@ Changed = true; MachineInstr *MI = SwapVector[EntryIdx].VSEMI; MachineBasicBlock *MBB = MI->getParent(); - BuildMI(*MBB, MI, MI->getDebugLoc(), - TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) - .addOperand(MI->getOperand(1)); + BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(TargetOpcode::COPY), + MI->getOperand(0).getReg()) + .add(MI->getOperand(1)); DEBUG(dbgs() << format("Replaced %d with copy: ", SwapVector[EntryIdx].VSEId)); Index: lib/Target/SystemZ/SystemZElimCompare.cpp =================================================================== --- lib/Target/SystemZ/SystemZElimCompare.cpp +++ lib/Target/SystemZ/SystemZElimCompare.cpp @@ -216,9 +216,7 @@ Branch->RemoveOperand(0); Branch->setDesc(TII->get(BRCT)); MachineInstrBuilder MIB(*Branch->getParent()->getParent(), Branch); - MIB.addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(Target); + MIB.add(MI.getOperand(0)).add(MI.getOperand(1)).add(Target); // Add a CC def to BRCT(G), since we may have to split them again if the // branch displacement overflows. BRCTH has a 32-bit displacement, so // this is not necessary there. @@ -261,10 +259,10 @@ Branch->RemoveOperand(0); Branch->setDesc(TII->get(LATOpcode)); MachineInstrBuilder(*Branch->getParent()->getParent(), Branch) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) - .addOperand(MI.getOperand(3)); + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) + .add(MI.getOperand(3)); MI.eraseFromParent(); return true; } @@ -502,15 +500,15 @@ Branch->setDesc(TII->get(FusedOpcode)); MachineInstrBuilder MIB(*Branch->getParent()->getParent(), Branch); for (unsigned I = 0; I < SrcNOps; I++) - MIB.addOperand(Compare.getOperand(I)); - MIB.addOperand(CCMask); + MIB.add(Compare.getOperand(I)); + MIB.add(CCMask); if (Type == SystemZII::CompareAndBranch) { // Only conditional branches define CC, as they may be converted back // to a non-fused branch because of a long displacement. Conditional // returns don't have that problem. - MIB.addOperand(Target) - .addReg(SystemZ::CC, RegState::ImplicitDefine | RegState::Dead); + MIB.add(Target).addReg(SystemZ::CC, + RegState::ImplicitDefine | RegState::Dead); } if (Type == SystemZII::CompareAndSibcall) Index: lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- lib/Target/SystemZ/SystemZISelLowering.cpp +++ lib/Target/SystemZ/SystemZISelLowering.cpp @@ -5215,7 +5215,7 @@ unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) - .addOperand(Base) + .add(Base) .addImm(0) .addReg(0); return Reg; @@ -5304,8 +5304,11 @@ if (Invert) CCMask ^= CCValid; BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) - .addReg(SrcReg).addOperand(Base).addImm(Disp) - .addImm(CCValid).addImm(CCMask); + .addReg(SrcReg) + .add(Base) + .addImm(Disp) + .addImm(CCValid) + .addImm(CCMask); MI.eraseFromParent(); return MBB; } @@ -5332,7 +5335,10 @@ // # fallthrough to JoinMBB MBB = FalseMBB; BuildMI(MBB, DL, TII->get(StoreOpcode)) - .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); + .addReg(SrcReg) + .add(Base) + .addImm(Disp) + .addReg(IndexReg); MBB->addSuccessor(JoinMBB); MI.eraseFromParent(); @@ -5397,8 +5403,7 @@ // %OrigVal = L Disp(%Base) // # fall through to LoopMMB MBB = StartMBB; - BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) - .addOperand(Base).addImm(Disp).addReg(0); + BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); MBB->addSuccessor(LoopMBB); // LoopMBB: @@ -5419,8 +5424,7 @@ if (Invert) { // Perform the operation normally and then invert every bit of the field. unsigned Tmp = MRI.createVirtualRegister(RC); - BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) - .addReg(RotatedOldVal).addOperand(Src2); + BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); if (BitSize <= 32) // XILF with the upper BitSize bits set. BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) @@ -5436,7 +5440,8 @@ } else if (BinOpcode) // A simply binary operation. BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) - .addReg(RotatedOldVal).addOperand(Src2); + .addReg(RotatedOldVal) + .add(Src2); else if (IsSubWord) // Use RISBG to rotate Src2 into position and use it to replace the // field in RotatedOldVal. @@ -5447,7 +5452,10 @@ BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); BuildMI(MBB, DL, TII->get(CSOpcode), Dest) - .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); + .addReg(OldVal) + .addReg(NewVal) + .add(Base) + .addImm(Disp); BuildMI(MBB, DL, TII->get(SystemZ::BRC)) .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); @@ -5515,8 +5523,7 @@ // %OrigVal = L Disp(%Base) // # fall through to LoopMMB MBB = StartMBB; - BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) - .addOperand(Base).addImm(Disp).addReg(0); + BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); MBB->addSuccessor(LoopMBB); // LoopMBB: @@ -5563,7 +5570,10 @@ BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); BuildMI(MBB, DL, TII->get(CSOpcode), Dest) - .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); + .addReg(OldVal) + .addReg(NewVal) + .add(Base) + .addImm(Disp); BuildMI(MBB, DL, TII->get(SystemZ::BRC)) .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); @@ -5624,7 +5634,9 @@ // # fall through to LoopMMB MBB = StartMBB; BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) - .addOperand(Base).addImm(Disp).addReg(0); + .add(Base) + .addImm(Disp) + .addReg(0); MBB->addSuccessor(LoopMBB); // LoopMBB: @@ -5678,7 +5690,10 @@ BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) - .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); + .addReg(OldVal) + .addReg(StoreVal) + .add(Base) + .addImm(Disp); BuildMI(MBB, DL, TII->get(SystemZ::BRC)) .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); @@ -5851,7 +5866,7 @@ if (!isUInt<12>(DestDisp)) { unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) - .addOperand(DestBase) + .add(DestBase) .addImm(DestDisp) .addReg(0); DestBase = MachineOperand::CreateReg(Reg, false); @@ -5860,15 +5875,18 @@ if (!isUInt<12>(SrcDisp)) { unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) - .addOperand(SrcBase) + .add(SrcBase) .addImm(SrcDisp) .addReg(0); SrcBase = MachineOperand::CreateReg(Reg, false); SrcDisp = 0; } BuildMI(*MBB, MI, DL, TII->get(Opcode)) - .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) - .addOperand(SrcBase).addImm(SrcDisp); + .add(DestBase) + .addImm(DestDisp) + .addImm(ThisLength) + .add(SrcBase) + .addImm(SrcDisp); DestDisp += ThisLength; SrcDisp += ThisLength; Length -= ThisLength; Index: lib/Target/SystemZ/SystemZInstrInfo.cpp =================================================================== --- lib/Target/SystemZ/SystemZInstrInfo.cpp +++ lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -780,10 +780,11 @@ MI.RemoveOperand(0); MI.setDesc(get(SystemZ::CallBRCL)); MachineInstrBuilder(*MI.getParent()->getParent(), MI) - .addImm(CCValid).addImm(CCMask) - .addOperand(FirstOp) - .addRegMask(RegMask) - .addReg(SystemZ::CC, RegState::Implicit); + .addImm(CCValid) + .addImm(CCMask) + .add(FirstOp) + .addRegMask(RegMask) + .addReg(SystemZ::CC, RegState::Implicit); return true; } if (Opcode == SystemZ::CallBR) { @@ -976,12 +977,12 @@ MachineInstrBuilder MIB( *MF, MF->CreateMachineInstr(get(ThreeOperandOpcode), MI.getDebugLoc(), /*NoImplicit=*/true)); - MIB.addOperand(Dest); + MIB.add(Dest); // Keep the kill state, but drop the tied flag. MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()); // Keep the remaining operands as-is. for (unsigned I = 2; I < NumOps; ++I) - MIB.addOperand(MI.getOperand(I)); + MIB.add(MI.getOperand(I)); MBB->insert(MI, MIB); return finishConvertToThreeAddress(&MI, MIB, LV); } @@ -1009,7 +1010,7 @@ MachineOperand &Src = MI.getOperand(1); MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()) @@ -1091,7 +1092,7 @@ unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(StoreOpcode)) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addFrameIndex(FrameIndex) .addImm(0) .addReg(0); @@ -1132,7 +1133,7 @@ .addFrameIndex(FrameIndex) .addImm(0) .addImm(Size) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(MI.getOperand(2).getImm()) .addMemOperand(MMO); } @@ -1140,7 +1141,7 @@ if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) { return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(SystemZ::MVC)) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(MI.getOperand(2).getImm()) .addImm(Size) .addFrameIndex(FrameIndex) @@ -1164,7 +1165,7 @@ MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(MemOpcode)); for (unsigned I = 0; I < OpNum; ++I) - MIB.addOperand(MI.getOperand(I)); + MIB.add(MI.getOperand(I)); MIB.addFrameIndex(FrameIndex).addImm(Offset); if (MemDesc.TSFlags & SystemZII::HasIndex) MIB.addReg(0); Index: lib/Target/SystemZ/SystemZLongBranch.cpp =================================================================== --- lib/Target/SystemZ/SystemZLongBranch.cpp +++ lib/Target/SystemZ/SystemZLongBranch.cpp @@ -354,13 +354,13 @@ MachineBasicBlock *MBB = MI->getParent(); DebugLoc DL = MI->getDebugLoc(); BuildMI(*MBB, MI, DL, TII->get(AddOpcode)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)) - .addImm(-1); + .add(MI->getOperand(0)) + .add(MI->getOperand(1)) + .addImm(-1); MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL)) - .addImm(SystemZ::CCMASK_ICMP) - .addImm(SystemZ::CCMASK_CMP_NE) - .addOperand(MI->getOperand(2)); + .addImm(SystemZ::CCMASK_ICMP) + .addImm(SystemZ::CCMASK_CMP_NE) + .add(MI->getOperand(2)); // The implicit use of CC is a killing use. BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo()); MI->eraseFromParent(); @@ -373,12 +373,12 @@ MachineBasicBlock *MBB = MI->getParent(); DebugLoc DL = MI->getDebugLoc(); BuildMI(*MBB, MI, DL, TII->get(CompareOpcode)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)); + .add(MI->getOperand(0)) + .add(MI->getOperand(1)); MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL)) - .addImm(SystemZ::CCMASK_ICMP) - .addOperand(MI->getOperand(2)) - .addOperand(MI->getOperand(3)); + .addImm(SystemZ::CCMASK_ICMP) + .add(MI->getOperand(2)) + .add(MI->getOperand(3)); // The implicit use of CC is a killing use. BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo()); MI->eraseFromParent(); Index: lib/Target/SystemZ/SystemZShortenInst.cpp =================================================================== --- lib/Target/SystemZ/SystemZShortenInst.cpp +++ lib/Target/SystemZ/SystemZShortenInst.cpp @@ -167,10 +167,10 @@ MI.RemoveOperand(0); MI.setDesc(TII->get(Opcode)); MachineInstrBuilder(*MI.getParent()->getParent(), &MI) - .addOperand(Dest) - .addOperand(Mode) - .addOperand(Src) - .addOperand(Suppress); + .add(Dest) + .add(Mode) + .add(Src) + .add(Suppress); return true; } return false; Index: lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp +++ lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp @@ -183,11 +183,9 @@ assert(Cond.size() == 2 && "Expected a flag and a successor block"); if (Cond[0].getImm()) { - BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).addOperand(Cond[1]); + BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).add(Cond[1]); } else { - BuildMI(&MBB, DL, get(WebAssembly::BR_UNLESS)) - .addMBB(TBB) - .addOperand(Cond[1]); + BuildMI(&MBB, DL, get(WebAssembly::BR_UNLESS)).addMBB(TBB).add(Cond[1]); } if (!FBB) return 1; Index: lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp +++ lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp @@ -118,7 +118,7 @@ // delete the br_unless. assert(Inverted); BuildMI(MBB, MI, MI->getDebugLoc(), TII.get(WebAssembly::BR_IF)) - .addOperand(MI->getOperand(0)) + .add(MI->getOperand(0)) .addReg(Cond); MBB.erase(MI); } Index: lib/Target/X86/X86CallFrameOptimization.cpp =================================================================== --- lib/Target/X86/X86CallFrameOptimization.cpp +++ lib/Target/X86/X86CallFrameOptimization.cpp @@ -482,8 +482,7 @@ if (isInt<8>(Val)) PushOpcode = Is64Bit ? X86::PUSH64i8 : X86::PUSH32i8; } - Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)) - .addOperand(PushOp); + Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)).add(PushOp); break; case X86::MOV32mr: case X86::MOV64mr: @@ -496,9 +495,9 @@ Reg = MRI->createVirtualRegister(&X86::GR64RegClass); BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg); BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg) - .addReg(UndefReg) - .addOperand(PushOp) - .addImm(X86::sub_32bit); + .addReg(UndefReg) + .add(PushOp) + .addImm(X86::sub_32bit); } // If PUSHrmm is not slow on this target, try to fold the source of the Index: lib/Target/X86/X86ExpandPseudo.cpp =================================================================== --- lib/Target/X86/X86ExpandPseudo.cpp +++ lib/Target/X86/X86ExpandPseudo.cpp @@ -151,7 +151,7 @@ : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op)); for (unsigned i = 0; i != 5; ++i) - MIB.addOperand(MBBI->getOperand(i)); + MIB.add(MBBI->getOperand(i)); } else if (Opcode == X86::TCRETURNri64) { BuildMI(MBB, MBBI, DL, TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64)) @@ -214,7 +214,7 @@ MIB = BuildMI(MBB, MBBI, DL, TII->get(X86::RETL)); } for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I) - MIB.addOperand(MBBI->getOperand(I)); + MIB.add(MBBI->getOperand(I)); MBB.erase(MBBI); return true; } Index: lib/Target/X86/X86FixupBWInsts.cpp =================================================================== --- lib/Target/X86/X86FixupBWInsts.cpp +++ lib/Target/X86/X86FixupBWInsts.cpp @@ -226,7 +226,7 @@ unsigned NumArgs = MI->getNumOperands(); for (unsigned i = 1; i < NumArgs; ++i) - MIB.addOperand(MI->getOperand(i)); + MIB.add(MI->getOperand(i)); MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); @@ -264,7 +264,7 @@ // Drop imp-defs/uses that would be redundant with the new def/use. for (auto &Op : MI->implicit_operands()) if (Op.getReg() != (Op.isDef() ? NewDestReg : NewSrcReg)) - MIB.addOperand(Op); + MIB.add(Op); return MIB; } Index: lib/Target/X86/X86FixupLEAs.cpp =================================================================== --- lib/Target/X86/X86FixupLEAs.cpp +++ lib/Target/X86/X86FixupLEAs.cpp @@ -120,8 +120,8 @@ BuildMI(*MF, MI.getDebugLoc(), TII->get(MI.getOpcode() == X86::MOV32rr ? X86::LEA32r : X86::LEA64r)) - .addOperand(Dest) - .addOperand(Src) + .add(Dest) + .add(Src) .addImm(1) .addReg(0) .addImm(0) @@ -287,8 +287,8 @@ MachineInstr *NewMI = BuildMI(*MFI, I, MI.getDebugLoc(), TII->get(NewOpcode)) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)); + .add(MI.getOperand(0)) + .add(MI.getOperand(1)); MFI->erase(I); I = static_cast(NewMI); return true; @@ -377,9 +377,9 @@ const MachineOperand &Src1 = MI.getOperand(SrcR1 == DstR ? 1 : 3); const MachineOperand &Src2 = MI.getOperand(SrcR1 == DstR ? 3 : 1); NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addrr_opcode)) - .addOperand(Dst) - .addOperand(Src1) - .addOperand(Src2); + .add(Dst) + .add(Src1) + .add(Src2); MFI->insert(I, NewMI); DEBUG(NewMI->dump();); } @@ -387,8 +387,8 @@ if (MI.getOperand(4).getImm() != 0) { const MachineOperand &SrcR = MI.getOperand(SrcR1 == DstR ? 1 : 3); NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addri_opcode)) - .addOperand(Dst) - .addOperand(SrcR) + .add(Dst) + .add(SrcR) .addImm(MI.getOperand(4).getImm()); MFI->insert(I, NewMI); DEBUG(NewMI->dump();); Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -24048,7 +24048,7 @@ for (unsigned i = 1; i < NumArgs; ++i) { MachineOperand &Op = MI.getOperand(i); if (!(Op.isReg() && Op.isImplicit())) - MIB.addOperand(Op); + MIB.add(Op); } if (MI.hasOneMemOperand()) MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); @@ -24084,7 +24084,7 @@ for (unsigned i = 1; i < NumArgs; ++i) { MachineOperand &Op = MI.getOperand(i); if (!(Op.isReg() && Op.isImplicit())) - MIB.addOperand(Op); + MIB.add(Op); } if (MI.hasOneMemOperand()) MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); @@ -24144,7 +24144,7 @@ unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); for (int i = 0; i < X86::AddrNumOperands; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); unsigned ValOps = X86::AddrNumOperands; BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) @@ -24282,12 +24282,12 @@ // Load the offset value into a register OffsetReg = MRI.createVirtualRegister(OffsetRegClass); BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, UseFPOffset ? 4 : 0) - .addOperand(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, UseFPOffset ? 4 : 0) + .add(Segment) + .setMemRefs(MMOBegin, MMOEnd); // Check if there is enough room left to pull this argument. BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) @@ -24307,12 +24307,12 @@ // Read the reg_save_area address. unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, 16) - .addOperand(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, 16) + .add(Segment) + .setMemRefs(MMOBegin, MMOEnd); // Zero-extend the offset unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); @@ -24334,13 +24334,13 @@ // Store it back into the va_list. BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, UseFPOffset ? 4 : 0) - .addOperand(Segment) - .addReg(NextOffsetReg) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, UseFPOffset ? 4 : 0) + .add(Segment) + .addReg(NextOffsetReg) + .setMemRefs(MMOBegin, MMOEnd); // Jump to endMBB BuildMI(offsetMBB, DL, TII->get(X86::JMP_1)) @@ -24354,12 +24354,12 @@ // Load the overflow_area address into a register. unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, 8) - .addOperand(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, 8) + .add(Segment) + .setMemRefs(MMOBegin, MMOEnd); // If we need to align it, do so. Otherwise, just copy the address // to OverflowDestReg. @@ -24390,13 +24390,13 @@ // Store the new overflow address. BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, 8) - .addOperand(Segment) - .addReg(NextAddrReg) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, 8) + .add(Segment) + .addReg(NextAddrReg) + .setMemRefs(MMOBegin, MMOEnd); // If we branched, emit the PHI to the front of endMBB. if (offsetMBB) { @@ -24869,12 +24869,12 @@ // instruction using the same address operands. if (Operand.isReg()) Operand.setIsKill(false); - MIB.addOperand(Operand); + MIB.add(Operand); } MachineInstr *FOpMI = MIB; MIB = BuildMI(*BB, MI, DL, TII->get(MOp)); for (int i = 0; i < X86::AddrNumOperands; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); MIB.addReg(FOpMI->getOperand(0).getReg(), RegState::Kill); MI.eraseFromParent(); // The pseudo instruction is gone now. return BB; @@ -25254,7 +25254,7 @@ if (i == X86::AddrDisp) MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset); else - MIB.addOperand(MI.getOperand(MemOpndSlot + i)); + MIB.add(MI.getOperand(MemOpndSlot + i)); } if (!UseImmLabel) MIB.addReg(LabelReg); @@ -25337,7 +25337,7 @@ // Reload FP MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP); for (unsigned i = 0; i < X86::AddrNumOperands; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); MIB.setMemRefs(MMOBegin, MMOEnd); // Reload IP MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp); @@ -25345,7 +25345,7 @@ if (i == X86::AddrDisp) MIB.addDisp(MI.getOperand(i), LabelOffset); else - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); } MIB.setMemRefs(MMOBegin, MMOEnd); // Reload SP @@ -25354,7 +25354,7 @@ if (i == X86::AddrDisp) MIB.addDisp(MI.getOperand(i), SPOffset); else - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); } MIB.setMemRefs(MMOBegin, MMOEnd); // Jump Index: lib/Target/X86/X86InstrBuilder.h =================================================================== --- lib/Target/X86/X86InstrBuilder.h +++ lib/Target/X86/X86InstrBuilder.h @@ -147,7 +147,7 @@ static inline const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, const MachineOperand& Offset) { - return MIB.addImm(1).addReg(0).addOperand(Offset).addReg(0); + return MIB.addImm(1).addReg(0).add(Offset).addReg(0); } /// addRegOffset - This function is used to add a memory reference of the form Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -3569,7 +3569,7 @@ const DebugLoc &DL = Orig.getDebugLoc(); BuildMI(MBB, I, DL, get(X86::MOV32ri)) - .addOperand(Orig.getOperand(0)) + .add(Orig.getOperand(0)) .addImm(Value); } else { MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); @@ -3654,10 +3654,10 @@ // Virtual register of the wrong class, we have to create a temporary 64-bit // vreg to feed into the LEA. NewSrc = MF.getRegInfo().createVirtualRegister(RC); - MachineInstr *Copy = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), - get(TargetOpcode::COPY)) - .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) - .addOperand(Src); + MachineInstr *Copy = + BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY)) + .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) + .add(Src); // Which is obviously going to be dead after we're done with it. isKill = true; @@ -3823,10 +3823,10 @@ return nullptr; NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addImm(1ULL << ShAmt) - .addOperand(Src) + .add(Src) .addImm(0) .addReg(0); break; @@ -3848,14 +3848,14 @@ MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addImm(1ULL << ShAmt) .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) .addImm(0) .addReg(0); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = MIB; break; @@ -3869,10 +3869,10 @@ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addImm(1ULL << ShAmt) - .addOperand(Src) + .add(Src) .addImm(0) .addReg(0); break; @@ -3891,11 +3891,11 @@ MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = addOffset(MIB, 1); break; @@ -3905,10 +3905,8 @@ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) - .addOperand(Src), - 1); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), 1); break; case X86::DEC64r: case X86::DEC32r: { @@ -3924,11 +3922,11 @@ return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = addOffset(MIB, -1); @@ -3939,10 +3937,8 @@ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) - .addOperand(Src), - -1); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), -1); break; case X86::ADD64rr: case X86::ADD64rr_DB: @@ -3970,12 +3966,11 @@ SrcReg2, isKill2, isUndef2, ImplicitOp2, LV)) return nullptr; - MachineInstrBuilder MIB = - BuildMI(MF, MI.getDebugLoc(), get(Opc)).addOperand(Dest); + MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); if (ImplicitOp2.getReg() != 0) - MIB.addOperand(ImplicitOp2); + MIB.add(ImplicitOp2); NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); @@ -3995,9 +3990,8 @@ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); unsigned Src2 = MI.getOperand(2).getReg(); bool isKill2 = MI.getOperand(2).isKill(); - NewMI = addRegReg( - BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).addOperand(Dest), - Src.getReg(), Src.isKill(), Src2, isKill2); + NewMI = addRegReg(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest), + Src.getReg(), Src.isKill(), Src2, isKill2); // Preserve undefness of the operands. bool isUndef = MI.getOperand(1).isUndef(); @@ -4014,10 +4008,9 @@ case X86::ADD64ri32_DB: case X86::ADD64ri8_DB: assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) - .addOperand(Dest) - .addOperand(Src), - MI.getOperand(2)); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src), + MI.getOperand(2)); break; case X86::ADD32ri: case X86::ADD32ri8: @@ -4034,11 +4027,11 @@ return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = addOffset(MIB, MI.getOperand(2)); break; @@ -4051,10 +4044,9 @@ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) - .addOperand(Src), - MI.getOperand(2)); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), + MI.getOperand(2)); break; } @@ -6044,7 +6036,7 @@ DebugLoc DL; MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); for (unsigned i = 0, e = Addr.size(); i != e; ++i) - MIB.addOperand(Addr[i]); + MIB.add(Addr[i]); MIB.addReg(SrcReg, getKillRegState(isKill)); (*MIB).setMemRefs(MMOBegin, MMOEnd); NewMIs.push_back(MIB); @@ -6079,7 +6071,7 @@ DebugLoc DL; MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); for (unsigned i = 0, e = Addr.size(); i != e; ++i) - MIB.addOperand(Addr[i]); + MIB.add(Addr[i]); (*MIB).setMemRefs(MMOBegin, MMOEnd); NewMIs.push_back(MIB); } @@ -6939,7 +6931,7 @@ if (NumAddrOps < 4) { // FrameIndex only - add an immediate offset (whether its zero or not). for (unsigned i = 0; i != NumAddrOps; ++i) - MIB.addOperand(MOs[i]); + MIB.add(MOs[i]); addOffset(MIB, PtrOffset); } else { // General Memory Addressing - we need to add any offset to an existing @@ -6950,7 +6942,7 @@ if (i == 3 && PtrOffset != 0) { MIB.addDisp(MO, PtrOffset); } else { - MIB.addOperand(MO); + MIB.add(MO); } } } @@ -6972,11 +6964,11 @@ unsigned NumOps = MI.getDesc().getNumOperands() - 2; for (unsigned i = 0; i != NumOps; ++i) { MachineOperand &MO = MI.getOperand(i + 2); - MIB.addOperand(MO); + MIB.add(MO); } for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - MIB.addOperand(MO); + MIB.add(MO); } MachineBasicBlock *MBB = InsertPt->getParent(); @@ -7001,7 +6993,7 @@ assert(MO.isReg() && "Expected to fold into reg operand!"); addOperands(MIB, MOs, PtrOffset); } else { - MIB.addOperand(MO); + MIB.add(MO); } } @@ -7879,11 +7871,11 @@ if (FoldedStore) MIB.addReg(Reg, RegState::Define); for (MachineOperand &BeforeOp : BeforeOps) - MIB.addOperand(BeforeOp); + MIB.add(BeforeOp); if (FoldedLoad) MIB.addReg(Reg); for (MachineOperand &AfterOp : AfterOps) - MIB.addOperand(AfterOp); + MIB.add(AfterOp); for (MachineOperand &ImpOp : ImpOps) { MIB.addReg(ImpOp.getReg(), getDefRegState(ImpOp.isDef()) |