diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -3202,6 +3202,31 @@ if (VT != MVT::i64) return SDValue(); + // fold (i64 (shr (add (zext a, i64), (zext b, i64)), 32)) -> (uaddo a, + // b).overflow + if (ShiftAmt == 32 && LHS.getOpcode() == ISD::ADD) { + SDValue AddLHS = LHS->getOperand(0); + SDValue AddRHS = LHS->getOperand(1); + + const auto Is32to64ZExt = [](SDValue V) -> bool { + return V->getOpcode() == ISD::ZERO_EXTEND && + V->getOperand(0)->getValueType(0) == MVT::i32 && + V->getValueType(0) == MVT::i64; + }; + + if (Is32to64ZExt(AddLHS) && Is32to64ZExt(AddRHS)) { + // Create a i32 uaddo + SDValue A = AddLHS->getOperand(0); + SDValue B = AddRHS->getOperand(0); + SDValue UADDO = DAG.getNode(ISD::UADDO, SL, {MVT::i32, MVT::i1}, {A, B}); + // Replace the original add with (i64 (zext (uaddo ...))) + DAG.ReplaceAllUsesOfValueWith( + LHS, DAG.getNode(ISD::ZERO_EXTEND, SL, VT, {UADDO})); + // Replace this right-shift with (i64 (zext (uaddo.overflow ...))) + return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, {UADDO.getValue(1)}); + } + } + if (ShiftAmt < 32) return SDValue(); diff --git a/llvm/test/CodeGen/AMDGPU/add_shr_carry.ll b/llvm/test/CodeGen/AMDGPU/add_shr_carry.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/add_shr_carry.ll @@ -0,0 +1,129 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=fiji -verify-machineinstrs | FileCheck -check-prefix=VI %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefix=GFX9 %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx1010 -verify-machineinstrs | FileCheck -check-prefix=GFX10 %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -verify-machineinstrs | FileCheck -check-prefix=GFX11 %s + +define i64 @basic(i32 %a, i32 %b, i64 %c) { +; VI-LABEL: basic: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: basic: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v3, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: basic: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_add_co_u32 v0, s4, v0, v1 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4 +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: basic: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_add_co_u32 v0, s0, v0, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0 +; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] +entry: + %a.zext = zext i32 %a to i64 + %b.zext = zext i32 %b to i64 + %add.a.b = add i64 %a.zext, %b.zext + %shr = lshr i64 %add.a.b, 32 + %add.c.shr = add i64 %c, %shr + ret i64 %add.c.shr +} + +define <3 x i32> @add_i96(<3 x i32> %0, <3 x i32> %1) #0 { +; VI-LABEL: add_i96: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, v4, v1 +; VI-NEXT: v_addc_u32_e64 v4, s[4:5], 0, 0, vcc +; VI-NEXT: v_add_u32_e32 v0, vcc, v3, v0 +; VI-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc +; VI-NEXT: v_add_u32_e32 v1, vcc, v1, v3 +; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc +; VI-NEXT: v_add_u32_e32 v2, vcc, v5, v2 +; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v3 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: add_i96: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v4, v1 +; GFX9-NEXT: v_addc_co_u32_e64 v4, s[4:5], 0, 0, vcc +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v3, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v1, v3 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v4, vcc +; GFX9-NEXT: v_add3_u32 v2, v5, v2, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: add_i96: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_add_co_u32 v0, s4, v3, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4 +; GFX10-NEXT: v_add_co_u32 v1, s4, v4, v1 +; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s4, 0, 0, s4 +; GFX10-NEXT: v_add_co_u32 v1, vcc_lo, v1, v3 +; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v4, vcc_lo +; GFX10-NEXT: v_add3_u32 v2, v5, v2, v3 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: add_i96: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_add_co_u32 v0, s0, v3, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0 +; GFX11-NEXT: v_add_co_u32 v1, s0, v4, v1 +; GFX11-NEXT: v_add_co_ci_u32_e64 v4, null, 0, 0, s0 +; GFX11-NEXT: v_add_co_u32 v1, vcc_lo, v1, v3 +; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v4, vcc_lo +; GFX11-NEXT: v_add3_u32 v2, v5, v2, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %3 = extractelement <3 x i32> %0, i64 0 + %4 = zext i32 %3 to i64 + %5 = extractelement <3 x i32> %1, i64 0 + %6 = zext i32 %5 to i64 + %7 = add nuw nsw i64 %6, %4 + %8 = extractelement <3 x i32> %0, i64 1 + %9 = zext i32 %8 to i64 + %10 = extractelement <3 x i32> %1, i64 1 + %11 = zext i32 %10 to i64 + %12 = add nuw nsw i64 %11, %9 + %13 = lshr i64 %7, 32 + %14 = add nuw nsw i64 %12, %13 + %15 = extractelement <3 x i32> %0, i64 2 + %16 = extractelement <3 x i32> %1, i64 2 + %17 = add i32 %16, %15 + %18 = lshr i64 %14, 32 + %19 = trunc i64 %18 to i32 + %20 = add i32 %17, %19 + %21 = trunc i64 %7 to i32 + %22 = insertelement <3 x i32> undef, i32 %21, i32 0 + %23 = trunc i64 %14 to i32 + %24 = insertelement <3 x i32> %22, i32 %23, i32 1 + %25 = insertelement <3 x i32> %24, i32 %20, i32 2 + ret <3 x i32> %25 +}