Index: lib/Target/AMDGPU/SIFoldOperands.cpp =================================================================== --- lib/Target/AMDGPU/SIFoldOperands.cpp +++ lib/Target/AMDGPU/SIFoldOperands.cpp @@ -362,8 +362,6 @@ bool FoldingImm = OpToFold.isImm(); - // In order to fold immediates into copies, we need to change the - // copy to a MOV. if (FoldingImm && UseMI->isCopy()) { unsigned DestReg = UseMI->getOperand(0).getReg(); const TargetRegisterClass *DestRC @@ -371,6 +369,31 @@ MRI->getRegClass(DestReg) : TRI->getPhysRegClass(DestReg); + unsigned SrcReg = UseMI->getOperand(1).getReg(); + if (TargetRegisterInfo::isVirtualRegister(DestReg) && + TargetRegisterInfo::isVirtualRegister(SrcReg)) { + const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg); + if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) { + MachineRegisterInfo::use_iterator NextUse; + SmallVector CopyUses; + for (MachineRegisterInfo::use_iterator + Use = MRI->use_begin(DestReg), E = MRI->use_end(); + Use != E; Use = NextUse) { + NextUse = std::next(Use); + FoldCandidate FC = FoldCandidate(Use->getParent(), + Use.getOperandNo(), &UseMI->getOperand(1)); + CopyUses.push_back(FC); + } + for (auto & F : CopyUses) { + foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, + FoldList, CopiesToReplace); + } + } + } + + // In order to fold immediates into copies, we need to change the + // copy to a MOV. + unsigned MovOp = TII->getMovOpcode(DestRC); if (MovOp == AMDGPU::COPY) return; Index: test/CodeGen/AMDGPU/fold-imm-copy.mir =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/fold-imm-copy.mir @@ -0,0 +1,33 @@ +# RUN: llc -march=amdgcn -run-pass si-fold-operands %s -o - | FileCheck -check-prefix=GCN %s + +# GCN-LABEL: name: fold-imm-copy +# GCN: [[SREG:%[0-9+]]]:sreg_32_xm0 = S_MOV_B32 65535 +# GCN: V_AND_B32_e32 [[SREG]] + +name: fold-imm-copy +registers: + - { id: 0, class: vgpr_32, preferred-register: '' } + - { id: 1, class: sgpr_64, preferred-register: '' } + - { id: 2, class: sreg_128, preferred-register: '' } + - { id: 3, class: sreg_32_xm0, preferred-register: '' } + - { id: 4, class: vgpr_32, preferred-register: '' } + - { id: 5, class: vgpr_32, preferred-register: '' } + - { id: 6, class: vreg_64, preferred-register: '' } + - { id: 8, class: sreg_32_xm0, preferred-register: '' } + - { id: 9, class: vgpr_32, preferred-register: '' } + - { id: 10, class: vgpr_32, preferred-register: '' } +body: | + bb.0: + liveins: $vgpr0, $sgpr0_sgpr1 + %0:vgpr_32 = COPY $vgpr0 + %1:sgpr_64 = COPY $sgpr0_sgpr1 + %2:sreg_128 = S_LOAD_DWORDX4_IMM %1, 9, 0 + %3:sreg_32_xm0 = S_MOV_B32 2 + %4:vgpr_32 = V_LSHLREV_B32_e64 killed %3, %0, implicit $exec + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + %6:vreg_64 = REG_SEQUENCE killed %4, %subreg.sub0, killed %5, %subreg.sub1 + %7:vgpr_32 = BUFFER_LOAD_DWORD_ADDR64 %6, %2, 0, 4, 0, 0, 0, implicit $exec + %8:sreg_32_xm0 = S_MOV_B32 65535 + %9:vgpr_32 = COPY %8 + %10:vgpr_32 = V_AND_B32_e32 %7, %9, implicit $exec +...