diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp --- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -869,7 +869,8 @@ return; MachineInstr &MISucc = *NextOp->getParent(); // Can the successor be shrunk? - if (!TII->canShrink(MISucc, *MRI)) + if (!TII->canShrink(MISucc, *MRI) && + !TII->getNamedOperand(MI, AMDGPU::OpName::src1)->isReg()) return; int SuccOpc = AMDGPU::getVOPe32(MISucc.getOpcode()); // Make sure the carry in/out are subsequently unused. @@ -903,6 +904,43 @@ MI.eraseFromParent(); + // We are doing the following trasformation here to allow conversion of + // V_ADDC_U32 and V_SUBB_U32 to their VOP2 form + // + // %24:vgpr_32, %26:sreg_64_xexec = V_ADD_CO_U32_e64 + // %6:vgpr_32, %12:vgpr_32, 0, implicit $exec + // %25:vgpr_32, dead %27:sreg_64_xexec = V_ADDC_U32_e64 + // 0, 0, killed %26:sreg_64_xexec, 0, implicit $exec + // + // becomes, + // %24:vgpr_32, %26:sreg_64_xexec = V_ADD_CO_U32_e64 + // %6:vgpr_32, %12:vgpr_32, 0, implicit $exec + // %33:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + // %25:vgpr_32, dead %27:sreg_64_xexec = V_ADDC_U32_e64 + // 0, %33:vgpr_32, killed %26:sreg_64_xexec, 0, implicit + // $exec + + const MachineOperand *Src1 = + TII->getNamedOperand(MISucc, AMDGPU::OpName::src1); + if (!Src1->isReg()) { + + // Add a move instruction to replace the src1 immediate 0 + Register src1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); + BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), + src1) + .addImm(Src1->getImm()); + + // Replace MISucc with V_{SUBB|ADDC}_U32_e32 + BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(SuccOpc)) + .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::vdst)) + .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src0)) + .addReg(src1) + .setMIFlags(MISucc.getFlags()); + + MISucc.eraseFromParent(); + return; + } + // Replace MISucc with V_{SUBB|ADDC}_U32_e32 BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(SuccOpc)) .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::vdst)) diff --git a/llvm/test/CodeGen/AMDGPU/v_add_u64_pseudo_sdwa.ll b/llvm/test/CodeGen/AMDGPU/v_add_u64_pseudo_sdwa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/v_add_u64_pseudo_sdwa.ll @@ -0,0 +1,54 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s +define amdgpu_kernel void @sdwa_test() local_unnamed_addr #0 { +; GFX9-LABEL: sdwa_test: +; GFX9: ; %bb.0: ; %bb +; GFX9-NEXT: v_add_u32_e32 v1, 10, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 20, v0 +; GFX9-NEXT: v_add_co_u32_sdwa v0, vcc, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc +; GFX9-NEXT: global_store_dwordx2 v[0:1], v[0:1], off +; GFX9-NEXT: s_endpgm +bb: + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() + %v0 = add i32 %tid, 10 + %v1 = add i32 %tid, 20 + %v2 = zext i32 %v0 to i64 + %v3 = zext i32 %v1 to i64 + %v.t = and i64 %v3, 255 + %v4 = add i64 %v2, %v.t + store i64 %v4, i64 addrspace(1) * undef + ret void +} + + +define amdgpu_kernel void @test_add_co_sdwa(i64 addrspace(1)* %arg, i32 addrspace(1)* %arg1) #0 { +; GFX9-LABEL: test_add_co_sdwa: +; GFX9: ; %bb.0: ; %bb +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v3, 3, v0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: global_load_dword v4, v2, s[2:3] +; GFX9-NEXT: global_load_dwordx2 v[0:1], v3, s[0:1] +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc +; GFX9-NEXT: global_store_dwordx2 v3, v[0:1], s[0:1] +; GFX9-NEXT: s_endpgm +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp + %tmp4 = load i32, i32 addrspace(1)* %tmp3, align 4 + %tmp5 = and i32 %tmp4, 255 + %tmp6 = zext i32 %tmp5 to i64 + %tmp7 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp + %tmp8 = load i64, i64 addrspace(1)* %tmp7, align 8 + %tmp9 = add nsw i64 %tmp8, %tmp6 + store i64 %tmp9, i64 addrspace(1)* %tmp7, align 8 + ret void +} + + +declare i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/v_sub_u64_pseudo_sdwa.ll b/llvm/test/CodeGen/AMDGPU/v_sub_u64_pseudo_sdwa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/v_sub_u64_pseudo_sdwa.ll @@ -0,0 +1,54 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s +define amdgpu_kernel void @sdwa_test_sub() local_unnamed_addr #0 { +; GFX9-LABEL: sdwa_test_sub: +; GFX9: ; %bb.0: ; %bb +; GFX9-NEXT: v_add_u32_e32 v1, 10, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 20, v0 +; GFX9-NEXT: v_sub_co_u32_sdwa v0, vcc, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, 0, v1, vcc +; GFX9-NEXT: global_store_dwordx2 v[0:1], v[0:1], off +; GFX9-NEXT: s_endpgm +bb: + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() + %v0 = add i32 %tid, 10 + %v1 = add i32 %tid, 20 + %v2 = zext i32 %v0 to i64 + %v3 = zext i32 %v1 to i64 + %v.t = and i64 %v3, 255 + %v4 = sub i64 %v2, %v.t + store i64 %v4, i64 addrspace(1) * undef + ret void +} + + +define amdgpu_kernel void @test_sub_co_sdwa(i64 addrspace(1)* %arg, i32 addrspace(1)* %arg1) #0 { +; GFX9-LABEL: test_sub_co_sdwa: +; GFX9: ; %bb.0: ; %bb +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v3, 3, v0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: global_load_dword v4, v2, s[2:3] +; GFX9-NEXT: global_load_dwordx2 v[0:1], v3, s[0:1] +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_sub_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc +; GFX9-NEXT: global_store_dwordx2 v3, v[0:1], s[0:1] +; GFX9-NEXT: s_endpgm +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp + %tmp4 = load i32, i32 addrspace(1)* %tmp3, align 4 + %tmp5 = and i32 %tmp4, 255 + %tmp6 = zext i32 %tmp5 to i64 + %tmp7 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp + %tmp8 = load i64, i64 addrspace(1)* %tmp7, align 8 + %tmp9 = sub nsw i64 %tmp8, %tmp6 + store i64 %tmp9, i64 addrspace(1)* %tmp7, align 8 + ret void +} + + +declare i32 @llvm.amdgcn.workitem.id.x()