diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp --- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -869,7 +869,8 @@ return; MachineInstr &MISucc = *NextOp->getParent(); // Can the successor be shrunk? - if (!TII->canShrink(MISucc, *MRI)) + if (!TII->canShrink(MISucc, *MRI) && + !TII->getNamedOperand(MI, AMDGPU::OpName::src1)->isReg()) return; int SuccOpc = AMDGPU::getVOPe32(MISucc.getOpcode()); // Make sure the carry in/out are subsequently unused. @@ -903,6 +904,43 @@ MI.eraseFromParent(); + // We are doing the following trasformation here to allow conversion of + // V_ADDC_U32 and V_SUBB_U32 to their VOP2 form + // + // %24:vgpr_32, %26:sreg_64_xexec = V_ADD_CO_U32_e64 + // %6:vgpr_32, %12:vgpr_32, 0, implicit $exec + // %25:vgpr_32, dead %27:sreg_64_xexec = V_ADDC_U32_e64 + // 0, 0, killed %26:sreg_64_xexec, 0, implicit $exec + // + // becomes, + // %24:vgpr_32, %26:sreg_64_xexec = V_ADD_CO_U32_e64 + // %6:vgpr_32, %12:vgpr_32, 0, implicit $exec + // %33:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + // %25:vgpr_32, dead %27:sreg_64_xexec = V_ADDC_U32_e64 + // 0, %33:vgpr_32, killed %26:sreg_64_xexec, 0, implicit + // $exec + + const MachineOperand *Src1 = + TII->getNamedOperand(MISucc, AMDGPU::OpName::src1); + if (!Src1->isReg()) { + + // Add a move instruction to replace the src1 immediate 0 + Register src1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); + BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), + src1) + .addImm(Src1->getImm()); + + // Replace MISucc with V_{SUBB|ADDC}_U32_e32 + BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(SuccOpc)) + .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::vdst)) + .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src0)) + .addReg(src1) + .setMIFlags(MISucc.getFlags()); + + MISucc.eraseFromParent(); + return; + } + // Replace MISucc with V_{SUBB|ADDC}_U32_e32 BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(SuccOpc)) .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::vdst)) diff --git a/llvm/test/CodeGen/AMDGPU/v_add_u64_pseudo_sdwa.ll b/llvm/test/CodeGen/AMDGPU/v_add_u64_pseudo_sdwa.ll --- a/llvm/test/CodeGen/AMDGPU/v_add_u64_pseudo_sdwa.ll +++ b/llvm/test/CodeGen/AMDGPU/v_add_u64_pseudo_sdwa.ll @@ -5,9 +5,9 @@ ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: v_add_u32_e32 v1, 10, v0 ; GFX9-NEXT: v_add_u32_e32 v0, 20, v0 -; GFX9-NEXT: v_and_b32_e32 v0, 0xff, v0 -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v1, v0 -; GFX9-NEXT: v_addc_co_u32_e64 v1, s[0:1], 0, 0, vcc +; GFX9-NEXT: v_add_co_u32_sdwa v0, vcc, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX9-NEXT: global_store_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_endpgm bb: @@ -27,16 +27,13 @@ ; GFX9-LABEL: test_add_co_sdwa: ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 3, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: global_load_dword v2, v1, s[2:3] -; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: global_load_dword v4, v2, s[2:3] ; GFX9-NEXT: global_load_dwordx2 v[0:1], v3, s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT: v_add_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX9-NEXT: global_store_dwordx2 v3, v[0:1], s[0:1] ; GFX9-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/v_sub_u64_pseudo_sdwa.ll b/llvm/test/CodeGen/AMDGPU/v_sub_u64_pseudo_sdwa.ll --- a/llvm/test/CodeGen/AMDGPU/v_sub_u64_pseudo_sdwa.ll +++ b/llvm/test/CodeGen/AMDGPU/v_sub_u64_pseudo_sdwa.ll @@ -5,9 +5,9 @@ ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: v_add_u32_e32 v1, 10, v0 ; GFX9-NEXT: v_add_u32_e32 v0, 20, v0 -; GFX9-NEXT: v_and_b32_e32 v0, 0xff, v0 -; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v1, v0 -; GFX9-NEXT: v_subb_co_u32_e64 v1, s[0:1], 0, 0, vcc +; GFX9-NEXT: v_sub_co_u32_sdwa v0, vcc, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX9-NEXT: global_store_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_endpgm bb: @@ -27,16 +27,13 @@ ; GFX9-LABEL: test_sub_co_sdwa: ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 3, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: global_load_dword v2, v1, s[2:3] -; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: global_load_dword v4, v2, s[2:3] ; GFX9-NEXT: global_load_dwordx2 v[0:1], v3, s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT: v_sub_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX9-NEXT: global_store_dwordx2 v3, v[0:1], s[0:1] ; GFX9-NEXT: s_endpgm