diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -245,8 +245,7 @@ if (MO.isReg()) { unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); Register Reg = MO.getReg(); - BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) - .addReg(Reg, 0, ComposedSubIdx); + TII.buildCopy(*BB, MI, MI->getDebugLoc(), DstReg, Reg, 0, ComposedSubIdx); return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), MO.isKill(), MO.isDead(), MO.isUndef(), @@ -429,8 +428,7 @@ Register Src1Reg = I.getOperand(3).getReg(); if (HasCarryIn) { - BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) - .addReg(I.getOperand(4).getReg()); + TII.buildCopy(*BB, &I, DL, AMDGPU::SCC, I.getOperand(4).getReg()); } unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; @@ -439,8 +437,7 @@ BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg) .add(I.getOperand(2)) .add(I.getOperand(3)); - BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg) - .addReg(AMDGPU::SCC); + TII.buildCopy(*BB, &I, DL, Dst1Reg, AMDGPU::SCC); if (!MRI->getRegClassOrNull(Dst1Reg)) MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass); @@ -516,8 +513,7 @@ SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I, *SrcRC, I.getOperand(1)); const DebugLoc &DL = I.getDebugLoc(); - BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg) - .addReg(SrcReg, 0, SubReg); + TII.buildCopy(*BB, &I, DL, DstReg, SrcReg, 0, SubReg); I.eraseFromParent(); return true; @@ -643,8 +639,7 @@ ArrayRef SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8); for (int I = 0, E = NumDst; I != E; ++I) { MachineOperand &Dst = MI.getOperand(I); - BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg()) - .addReg(SrcReg, 0, SubRegs[I]); + TII.buildCopy(*BB, &MI, DL, Dst.getReg(), SrcReg, 0, SubRegs[I]); // Make sure the subregister index is valid for the source register. SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]); @@ -934,8 +929,7 @@ const DebugLoc &DL = MI.getDebugLoc(); MachineBasicBlock *MBB = MI.getParent(); - BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) - .addReg(M0Val); + TII.buildCopy(*MBB, &MI, DL, AMDGPU::M0, M0Val); BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov) .addImm(2) .addImm(MI.getOperand(4).getImm()) // $attr @@ -1001,8 +995,7 @@ // VALU. Constrain to a different SGPR to help avoid needing a nop later. RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI); - BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) - .addReg(LaneSelect); + TII.buildCopy(*MBB, *MIB, DL, AMDGPU::M0, LaneSelect); MIB.addReg(AMDGPU::M0); } } @@ -1290,8 +1283,7 @@ MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode)) .add(I.getOperand(2)) .add(I.getOperand(3)); - BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg) - .addReg(AMDGPU::SCC); + TII.buildCopy(*BB, &I, DL, CCReg, AMDGPU::SCC); bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) && RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI); @@ -1393,12 +1385,12 @@ BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0); } else if (Value == -1) { // all ones Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; - BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); + TII.buildCopy(*BB, &I, DL, DstReg, SrcReg); } else return false; } else { Register SrcReg = I.getOperand(2).getReg(); - BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); + TII.buildCopy(*BB, &I, DL, DstReg, SrcReg); } I.eraseFromParent(); @@ -1488,8 +1480,7 @@ Register ReturnAddrReg = TRI.getReturnAddressReg(MF); Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg, AMDGPU::SReg_64RegClass, DL); - BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg) - .addReg(LiveIn); + TII.buildCopy(*MBB, &I, DL, DstReg, LiveIn); I.eraseFromParent(); return true; } @@ -1554,8 +1545,7 @@ unsigned Offset = Offset0 | (Offset1 << 8); Register M0Val = MI.getOperand(2).getReg(); - BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) - .addReg(M0Val); + TII.buildCopy(*MBB, &MI, DL, AMDGPU::M0, M0Val); Register DstReg = MI.getOperand(0).getReg(); Register ValReg = MI.getOperand(3).getReg(); @@ -1656,8 +1646,7 @@ .addReg(BaseOffset) .addImm(16); - BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) - .addReg(M0Base); + TII.buildCopy(*MBB, &MI, DL, AMDGPU::M0, M0Base); } // The resource id offset is computed as ( + M0[21:16] + @@ -1701,8 +1690,7 @@ const DebugLoc &DL = MI.getDebugLoc(); const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; - BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) - .addReg(PtrBase); + TII.buildCopy(*MBB, &MI, DL, AMDGPU::M0, PtrBase); if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI)) return false; @@ -1913,8 +1901,7 @@ MIB.addDef(TmpReg); if (!MRI->use_empty(VDataOut)) { - BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut) - .addReg(TmpReg, RegState::Kill, SubReg); + TII.buildCopy(*MBB, &MI, DL, VDataOut, TmpReg, RegState::Kill, SubReg); } } else { @@ -2084,8 +2071,7 @@ if (!isVCC(CCReg, *MRI)) { unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32; - MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) - .addReg(CCReg); + MachineInstr *CopySCC = TII.buildCopy(*BB, &I, DL, AMDGPU::SCC, CCReg); // The generic constrainSelectedInstRegOperands doesn't work for the scc register // bank, because it does not cover the register class that we used to represent @@ -2184,10 +2170,8 @@ Register LoReg = MRI->createVirtualRegister(DstRC); Register HiReg = MRI->createVirtualRegister(DstRC); - BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg) - .addReg(SrcReg, 0, AMDGPU::sub0); - BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg) - .addReg(SrcReg, 0, AMDGPU::sub1); + TII.buildCopy(*MBB, I, DL, LoReg, SrcReg, 0, AMDGPU::sub0); + TII.buildCopy(*MBB, I, DL, HiReg, SrcReg, 0, AMDGPU::sub1); if (IsVALU && STI.hasSDWA()) { // Write the low 16-bits of the high element into the high 16-bits of the @@ -2539,10 +2523,8 @@ Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); - BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) - .addReg(Src, 0, AMDGPU::sub0); - BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) - .addReg(Src, 0, AMDGPU::sub1); + TII.buildCopy(*BB, &MI, DL, LoReg, Src, 0, AMDGPU::sub0); + TII.buildCopy(*BB, &MI, DL, HiReg, Src, 0, AMDGPU::sub1); BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) .addImm(0x80000000); @@ -2580,10 +2562,8 @@ !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) return false; - BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) - .addReg(Src, 0, AMDGPU::sub0); - BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) - .addReg(Src, 0, AMDGPU::sub1); + TII.buildCopy(*BB, &MI, DL, LoReg, Src, 0, AMDGPU::sub0); + TII.buildCopy(*BB, &MI, DL, HiReg, Src, 0, AMDGPU::sub1); BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) .addImm(0x7fffffff); @@ -2700,7 +2680,7 @@ MachineInstr &MI = *MRI.getUniqueVRegDef(Reg); const unsigned Opcode = MI.getOpcode(); - if (Opcode == AMDGPU::COPY) + if (MI.isCopy()) return isVCmpResult(MI.getOperand(1).getReg(), MRI); if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR || @@ -2761,8 +2741,7 @@ if (!MRI->getRegClassOrNull(CondReg)) MRI->setRegClass(CondReg, ConstrainRC); - BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg) - .addReg(CondReg); + TII.buildCopy(*BB, &I, DL, CondPhysReg, CondReg); BuildMI(*BB, &I, DL, TII.get(BrOpcode)) .addMBB(I.getOperand(1).getMBB()); @@ -2846,10 +2825,8 @@ Register LoReg = MRI->createVirtualRegister(&RegRC); // Extract the subregisters from the source pointer. - BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg) - .addReg(SrcReg, 0, AMDGPU::sub0); - BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg) - .addReg(SrcReg, 0, AMDGPU::sub1); + TII.buildCopy(*BB, &I, DL, LoReg, SrcReg, 0, AMDGPU::sub0); + TII.buildCopy(*BB, &I, DL, HiReg, SrcReg, 0, AMDGPU::sub1); Register MaskedLo, MaskedHi; @@ -2861,8 +2838,7 @@ Register MaskLo = MRI->createVirtualRegister(&RegRC); MaskedLo = MRI->createVirtualRegister(&RegRC); - BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo) - .addReg(MaskReg, 0, AMDGPU::sub0); + TII.buildCopy(*BB, &I, DL, MaskLo, MaskReg, 0, AMDGPU::sub0); BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo) .addReg(LoReg) .addReg(MaskLo); @@ -2875,8 +2851,7 @@ Register MaskHi = MRI->createVirtualRegister(&RegRC); MaskedHi = MRI->createVirtualRegister(&RegRC); - BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi) - .addReg(MaskReg, 0, AMDGPU::sub1); + TII.buildCopy(*BB, &I, DL, MaskHi, MaskReg, 0, AMDGPU::sub1); BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi) .addReg(HiReg) .addReg(MaskHi); @@ -2959,8 +2934,7 @@ if (DstTy.getSizeInBits() != 32 && !Is64) return false; - BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) - .addReg(IdxReg); + TII.buildCopy(*BB, &MI, DL, AMDGPU::M0, IdxReg); unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32; BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg) @@ -2974,8 +2948,7 @@ return false; if (!STI.useVGPRIndexMode()) { - BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) - .addReg(IdxReg); + TII.buildCopy(*BB, &MI, DL, AMDGPU::M0, IdxReg); BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg) .addReg(SrcReg, 0, SubReg) .addReg(SrcReg, RegState::Implicit); @@ -3043,8 +3016,7 @@ const DebugLoc &DL = MI.getDebugLoc(); if (!IndexMode) { - BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) - .addReg(IdxReg); + TII.buildCopy(*BB, &MI, DL, AMDGPU::M0, IdxReg); const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo( VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID); @@ -3539,9 +3511,8 @@ // we now have an SGPR register source. To avoid potentially violating the // constant bus restriction, we need to insert a copy to a VGPR. Register VGPRSrc = MRI->cloneVirtualRegister(Root.getReg()); - BuildMI(*InsertPt->getParent(), InsertPt, InsertPt->getDebugLoc(), - TII.get(AMDGPU::COPY), VGPRSrc) - .addReg(Src); + TII.buildCopy(*InsertPt->getParent(), InsertPt, InsertPt->getDebugLoc(), + VGPRSrc, Src); Src = VGPRSrc; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp @@ -94,7 +94,7 @@ // Search for existing copy of Reg to vgpr. for (MachineInstr &Use : MRI.use_instructions(Reg)) { Register Def = Use.getOperand(0).getReg(); - if (Use.getOpcode() == AMDGPU::COPY && isVgprRegBank(Def)) + if (Use.isCopy() && isVgprRegBank(Def)) return Def; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -3155,7 +3155,7 @@ while (Start->getOpcode() != FrameSetupOpcode) { --Start; bool IsCopy = false; - if (Start->getOpcode() == AMDGPU::COPY) { + if (Start->isCopy()) { auto &Dst = Start->getOperand(0); if (Dst.isReg()) { Register Reg = Dst.getReg(); @@ -3195,7 +3195,7 @@ while (End->getOpcode() != FrameDestroyOpcode) { ++End; bool IsCopy = false; - if (End->getOpcode() == AMDGPU::COPY) { + if (End->isCopy()) { auto &Src = End->getOperand(1); if (Src.isReg()) { Register Reg = Src.getReg(); diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp --- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -338,7 +338,7 @@ const SIInstrInfo *TII, unsigned &SMovOp, int64_t &Imm) { - if (Copy->getOpcode() != AMDGPU::COPY) + if (!Copy->isCopy()) return false; if (!MoveImm->isMoveImmediate()) @@ -662,11 +662,9 @@ : MBB; MachineBasicBlock::iterator PointToInsertCopy = MI.isPHI() ? BlockToInsertCopy->getFirstInstrTerminator() : I; - MachineInstr *NewCopy = - BuildMI(*BlockToInsertCopy, PointToInsertCopy, - PointToInsertCopy->getDebugLoc(), - TII->get(AMDGPU::COPY), NewDst) - .addReg(MO.getReg()); + MachineInstr *NewCopy = TII->buildCopy( + *BlockToInsertCopy, PointToInsertCopy, + PointToInsertCopy->getDebugLoc(), NewDst, MO.getReg()); MO.setReg(NewDst); analyzeVGPRToSGPRCopy(NewCopy); } @@ -1084,9 +1082,8 @@ SCCCopy) .addImm(-1) .addImm(0); - I = BuildMI(*MI.getParent(), std::next(I), I->getDebugLoc(), - TII->get(AMDGPU::COPY), DstReg) - .addReg(SCCCopy); + I = TII->buildCopy(*MI.getParent(), std::next(I), I->getDebugLoc(), + DstReg, SCCCopy); MI.eraseFromParent(); continue; } diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -260,9 +260,8 @@ MachineInstr *Inst32 = TII->buildShrunkInst(*MI, Op32); if (HaveNonDbgCarryUse) { - BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::COPY), - Dst1.getReg()) - .addReg(AMDGPU::VCC, RegState::Kill); + TII->buildCopy(*MBB, MI, MI->getDebugLoc(), Dst1.getReg(), AMDGPU::VCC, + RegState::Kill); } // Keep the old instruction around to avoid breaking iterators, but @@ -1667,9 +1666,8 @@ PHI.getOperand(0).setReg(NewReg); MachineBasicBlock *MBB = PHI.getParent(); - BuildMI(*MBB, MBB->getFirstNonPHI(), Copy->getDebugLoc(), - TII->get(AMDGPU::COPY), PhiOut) - .addReg(NewReg, RegState::Kill); + TII->buildCopy(*MBB, MBB->getFirstNonPHI(), Copy->getDebugLoc(), PhiOut, + NewReg, RegState::Kill); Copy->eraseFromParent(); // We know this copy had a single use. LLVM_DEBUG(dbgs() << "Folded " << PHI); diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -2370,14 +2370,11 @@ Register NewVR = MRI->createVirtualRegister(RC); // Create copy from CSR to a virtual register. Entry->addLiveIn(*I); - BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) - .addReg(*I); + TII->buildCopy(*Entry, MBBI, DebugLoc(), NewVR, *I); // Insert the copy-back instructions right before the terminator. for (auto *Exit : Exits) - BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), - TII->get(TargetOpcode::COPY), *I) - .addReg(NewVR); + TII->buildCopy(*Exit, Exit->getFirstTerminator(), DebugLoc(), *I, NewVR); } } diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -3053,7 +3053,7 @@ return false; unsigned Opc = UseMI.getOpcode(); - if (Opc == AMDGPU::COPY) { + if (UseMI.isCopy()) { Register DstReg = UseMI.getOperand(0).getReg(); bool Is16Bit = getOpSize(UseMI, 0) == 2; bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); @@ -5003,8 +5003,7 @@ Register SubReg = MRI.createVirtualRegister(SubRC); if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { - BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) - .addReg(SuperReg.getReg(), 0, SubIdx); + buildCopy(*MBB, MI, DL, SubReg, SuperReg.getReg(), 0, SubIdx); return SubReg; } @@ -5014,11 +5013,10 @@ // eliminate this extra copy. Register NewSuperReg = MRI.createVirtualRegister(SuperRC); - BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) - .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); + buildCopy(*MBB, MI, DL, NewSuperReg, SuperReg.getReg(), 0, + SuperReg.getSubReg()); - BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) - .addReg(NewSuperReg, 0, SubIdx); + buildCopy(*MBB, MI, DL, SubReg, NewSuperReg, 0, SubIdx); return SubReg; } @@ -5392,9 +5390,8 @@ if (RI.hasAGPRs(VRC)) { VRC = RI.getEquivalentVGPRClass(VRC); Register NewSrcReg = MRI.createVirtualRegister(VRC); - BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), - get(TargetOpcode::COPY), NewSrcReg) - .addReg(SrcReg); + buildCopy(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), NewSrcReg, + SrcReg); SrcReg = NewSrcReg; } @@ -6360,8 +6357,7 @@ Register CarryInReg = Inst.getOperand(4).getReg(); if (!MRI.constrainRegClass(CarryInReg, CarryRC)) { Register NewCarryReg = MRI.createVirtualRegister(CarryRC); - BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg) - .addReg(CarryInReg); + buildCopy(*MBB, &Inst, Inst.getDebugLoc(), NewCarryReg, CarryInReg); } Register CarryOutReg = Inst.getOperand(1).getReg(); @@ -6645,8 +6641,7 @@ if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) { if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) { - BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC) - .addReg(CandI.getOperand(1).getReg()); + buildCopy(MBB, MII, DL, CopySCC, CandI.getOperand(1).getReg()); CopyFound = true; } break; @@ -7812,7 +7807,7 @@ } bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { - return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && + return !MI.isTerminator() && !MI.isCopy() && MI.modifiesRegister(AMDGPU::EXEC, &RI); } @@ -8279,7 +8274,7 @@ if (Cur != MBB.end()) do { if (!Cur->isPHI() && Cur->readsRegister(Dst)) - return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); + return buildCopy(MBB, Cur, DL, Dst, Src); ++Cur; } while (Cur != MBB.end() && Cur != LastPHIIt); diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp --- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -232,10 +232,9 @@ // will interfere with trying to form s_and_saveexec_b64 later. Register CopyReg = SimpleIf ? SaveExecReg : MRI->createVirtualRegister(BoolRC); - MachineInstr *CopyExec = - BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg) - .addReg(Exec) - .addReg(Exec, RegState::ImplicitDefine); + MachineInstrBuilder CopyExec = MachineInstrBuilder( + *MBB.getParent(), TII->buildCopy(MBB, I, DL, CopyReg, Exec)); + CopyExec.addReg(Exec, RegState::ImplicitDefine); LoweredIf.insert(CopyReg); Register Tmp = MRI->createVirtualRegister(BoolRC); diff --git a/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp b/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp --- a/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp @@ -492,7 +492,7 @@ for (MachineBasicBlock &MBB : *MF) { for (MachineInstr &MI : MBB) { - if (MI.getOpcode() != AMDGPU::COPY) + if (!MI.isCopy()) continue; Register DstReg = MI.getOperand(0).getReg(); @@ -571,7 +571,7 @@ MachineBasicBlock *IncomingMBB = MI->getOperand(i + 1).getMBB(); MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg); - if (IncomingDef->getOpcode() == AMDGPU::COPY) { + if (IncomingDef->isCopy()) { IncomingReg = IncomingDef->getOperand(1).getReg(); assert(isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg)); assert(!IncomingDef->getOperand(1).getSubReg()); @@ -674,8 +674,7 @@ LF.initialize(MBB); for (MachineInstr &MI : MBB) { - if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF && - MI.getOpcode() != AMDGPU::COPY) + if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF && !MI.isCopy()) continue; Register DstReg = MI.getOperand(0).getReg(); @@ -744,7 +743,7 @@ if (MI->getOpcode() == AMDGPU::IMPLICIT_DEF) return true; - if (MI->getOpcode() != AMDGPU::COPY) + if (!MI->isCopy()) break; Reg = MI->getOperand(1).getReg(); @@ -827,9 +826,9 @@ if (PrevConstant && CurConstant) { if (PrevVal == CurVal) { - BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg); + TII->buildCopy(MBB, I, DL, DstReg, CurReg); } else if (CurVal) { - BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(ExecReg); + TII->buildCopy(MBB, I, DL, DstReg, ExecReg); } else { BuildMI(MBB, I, DL, TII->get(XorOp), DstReg) .addReg(ExecReg) @@ -863,11 +862,9 @@ } if (PrevConstant && !PrevVal) { - BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg) - .addReg(CurMaskedReg); + TII->buildCopy(MBB, I, DL, DstReg, CurMaskedReg); } else if (CurConstant && !CurVal) { - BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg) - .addReg(PrevMaskedReg); + TII->buildCopy(MBB, I, DL, DstReg, PrevMaskedReg); } else if (PrevConstant && PrevVal) { BuildMI(MBB, I, DL, TII->get(OrN2Op), DstReg) .addReg(CurMaskedReg) diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp --- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp @@ -1821,7 +1821,7 @@ LastLowLatencyUser = i; // Moves COPY instructions on which depends // the low latency instructions too. - } else if (SU->getInstr()->getOpcode() == AMDGPU::COPY) { + } else if (SU->getInstr()->isCopy()) { bool CopyForLowLat = false; for (SDep& SuccDep : SU->Succs) { SUnit *Succ = SuccDep.getSUnit(); diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -1213,8 +1213,9 @@ // It could result in AGPR spills restored to VGPRs or the other way around, // making the src and dst with identical regclasses at this point. It just // needs a copy in such cases. - auto CopyMIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), Dst) - .addReg(Src, getKillRegState(IsKill)); + MachineInstrBuilder CopyMIB = + MachineInstrBuilder(*MBB.getParent(), TII->buildCopy(MBB, MI, DL, Dst)); + CopyMIB.addReg(Src, getKillRegState(IsKill)); CopyMIB->setAsmPrinterFlag(MachineInstr::ReloadReuse); return CopyMIB; } @@ -2446,8 +2447,8 @@ .addReg(ScaledReg, RegState::Kill) .addImm(Offset); if (!IsSALU) - BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg) - .addReg(ScaledReg, RegState::Kill); + TII->buildCopy(*MBB, MI, DL, ResultReg, ScaledReg, + RegState::Kill); else ResultReg = ScaledReg; diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp --- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -628,8 +628,7 @@ // This is really just a generic peephole that is not a canonical shrinking, // although requirements match the pass placement and it reduces code size too. MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const { - assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 || - MovT.getOpcode() == AMDGPU::COPY); + assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 || MovT.isCopy()); Register T = MovT.getOperand(0).getReg(); unsigned Tsub = MovT.getOperand(0).getSubReg(); @@ -655,10 +654,8 @@ MachineInstr *MovY = &*Iter; KilledT = MovY->killsRegister(T, TRI); - if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 && - MovY->getOpcode() != AMDGPU::COPY) || - !MovY->getOperand(1).isReg() || - MovY->getOperand(1).getReg() != T || + if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 && !MovY->isCopy()) || + !MovY->getOperand(1).isReg() || MovY->getOperand(1).getReg() != T || MovY->getOperand(1).getSubReg() != Tsub) continue; @@ -684,9 +681,7 @@ } continue; } - if (MovX || - (I->getOpcode() != AMDGPU::V_MOV_B32_e32 && - I->getOpcode() != AMDGPU::COPY) || + if (MovX || (I->getOpcode() != AMDGPU::V_MOV_B32_e32 && !I->isCopy()) || I->getOperand(0).getReg() != X || I->getOperand(0).getSubReg() != Xsub) { MovX = nullptr; @@ -802,8 +797,8 @@ } } - if (ST->hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 || - MI.getOpcode() == AMDGPU::COPY)) { + if (ST->hasSwap() && + (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 || MI.isCopy())) { if (auto *NextMI = matchSwap(MI)) { Next = NextMI->getIterator(); continue; diff --git a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp --- a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -745,11 +745,9 @@ Register SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); MachineInstr *Save = - BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), SaveReg) - .addReg(AMDGPU::SCC); + TII->buildCopy(MBB, Before, DebugLoc(), SaveReg, AMDGPU::SCC); MachineInstr *Restore = - BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::SCC) - .addReg(SaveReg); + TII->buildCopy(MBB, Before, DebugLoc(), AMDGPU::SCC, SaveReg); LIS->InsertMachineInstrInMaps(*Save); LIS->InsertMachineInstrInMaps(*Restore); @@ -1227,8 +1225,7 @@ MachineInstr *MI; if (SavedWQM) { - MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), Exec) - .addReg(SavedWQM); + MI = TII->buildCopy(MBB, Before, DebugLoc(), Exec, SavedWQM); } else { MI = BuildMI(MBB, Before, DebugLoc(), TII->get(WQMOpc), Exec).addReg(Exec); } @@ -1313,7 +1310,7 @@ auto II = MBB.getFirstNonPHI(), IE = MBB.end(); if (IsEntry) { // Skip the instruction that saves LiveMask - if (II != IE && II->getOpcode() == AMDGPU::COPY) + if (II != IE && II->isCopy()) ++II; } @@ -1482,8 +1479,7 @@ Register Dest = MI->getOperand(0).getReg(); MachineInstr *Copy = - BuildMI(*MI->getParent(), MI, DL, TII->get(AMDGPU::COPY), Dest) - .addReg(LiveMaskReg); + TII->buildCopy(*MI->getParent(), MI, DL, Dest, LiveMaskReg); LIS->ReplaceMachineInstrInMaps(*MI, *Copy); MI->eraseFromParent(); @@ -1622,8 +1618,7 @@ if (NeedsLiveMask || (GlobalFlags & StateWQM)) { LiveMaskReg = MRI->createVirtualRegister(TRI->getBoolRC()); MachineInstr *MI = - BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::COPY), LiveMaskReg) - .addReg(Exec); + TII->buildCopy(Entry, EntryMI, DebugLoc(), LiveMaskReg, Exec); LIS->InsertMachineInstrInMaps(*MI); }