diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -3202,6 +3202,50 @@ if (VT != MVT::i64) return SDValue(); + // fold (i64 (shr (add a, b), 32)) -> (uaddo a, b).overflow + // iff a/b have >= 32 leading zeroes + // (usually coming from a i32->i64 zext) + if (ShiftAmt == 32 && LHS.getOpcode() == ISD::ADD) { + SDValue AddLHS = LHS->getOperand(0); + SDValue AddRHS = LHS->getOperand(1); + + KnownBits AddLHSKnownBits = DAG.computeKnownBits(AddLHS); + KnownBits AddRHSKnownBits = DAG.computeKnownBits(AddRHS); + if (AddLHSKnownBits.countMinLeadingZeros() >= 32 && + AddRHSKnownBits.countMinLeadingZeros() >= 32) { + + // All users of the add must either be this shr, or truncs to i32. + // If there are other users, don't do the transform. + SmallVector TruncsToReplace; + bool CanCombine = true; + for (SDNode *User : LHS->uses()) { + if (User == N) + continue; + if (User->getOpcode() != ISD::TRUNCATE || + User->getValueType(0) != MVT::i32) { + CanCombine = false; + break; + } + + TruncsToReplace.push_back(SDValue(User, 0)); + } + + if (CanCombine) { + // (i32 (uaddo a, b)) + SDValue A = DAG.getNode(ISD::TRUNCATE, SL, {MVT::i32}, {AddLHS}); + SDValue B = DAG.getNode(ISD::TRUNCATE, SL, {MVT::i32}, {AddRHS}); + SDValue UADDO = + DAG.getNode(ISD::UADDO, SL, {MVT::i32, MVT::i1}, {A, B}); + + for (SDValue V : TruncsToReplace) + DAG.ReplaceAllUsesOfValueWith(V, UADDO); + + // Replace this shift with (i64 (zext uaddo.overflow)) + return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, {UADDO.getValue(1)}); + } + } + } + if (ShiftAmt < 32) return SDValue(); diff --git a/llvm/test/CodeGen/AMDGPU/add_shr_carry.ll b/llvm/test/CodeGen/AMDGPU/add_shr_carry.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/add_shr_carry.ll @@ -0,0 +1,221 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=fiji -verify-machineinstrs | FileCheck -check-prefix=VI %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefix=GFX9 %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx1010 -verify-machineinstrs | FileCheck -check-prefix=GFX10 %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -verify-machineinstrs | FileCheck -check-prefix=GFX11 %s + +define i64 @basic_zext(i32 %a, i32 %b, i64 %c) { +; VI-LABEL: basic_zext: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: basic_zext: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v3, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: basic_zext: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_add_co_u32 v0, s4, v0, v1 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4 +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: basic_zext: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_add_co_u32 v0, s0, v0, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0 +; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] +entry: + %a.zext = zext i32 %a to i64 + %b.zext = zext i32 %b to i64 + %add.a.b = add i64 %a.zext, %b.zext + %shr = lshr i64 %add.a.b, 32 + %add.c.shr = add i64 %c, %shr + ret i64 %add.c.shr +} + +define i64 @basic_cst_32leadingzeroes(i32 %b, i64 %c) { +; VI-LABEL: basic_cst_32leadingzeroes: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: basic_cst_32leadingzeroes: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, -1, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v1, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v2, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: basic_cst_32leadingzeroes: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_add_co_u32 v0, s4, v0, -1 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4 +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v1, v0 +; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v2, vcc_lo +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: basic_cst_32leadingzeroes: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_add_co_u32 v0, s0, v0, -1 +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v1, v0 +; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v2, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] +entry: + %b.zext = zext i32 %b to i64 + %add.a.b = add i64 4294967295, %b.zext ; 0xFFFFFFFF + %shr = lshr i64 %add.a.b, 32 + %add.c.shr = add i64 %c, %shr + ret i64 %add.c.shr +} + +define i64 @basic_cst_no32leadingzeroes(i32 %b, i64 %c) { +; VI-LABEL: basic_cst_no32leadingzeroes: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v0 +; VI-NEXT: v_addc_u32_e64 v0, s[4:5], 0, 1, vcc +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: basic_cst_no32leadingzeroes: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, -1, v0 +; GFX9-NEXT: v_addc_co_u32_e64 v0, s[4:5], 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v1, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v2, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: basic_cst_no32leadingzeroes: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_add_co_u32 v0, s4, v0, -1 +; GFX10-NEXT: v_add_co_ci_u32_e64 v0, s4, 0, 1, s4 +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v1, v0 +; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v2, vcc_lo +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: basic_cst_no32leadingzeroes: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_add_co_u32 v0, s0, v0, -1 +; GFX11-NEXT: v_add_co_ci_u32_e64 v0, null, 0, 1, s0 +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v1, v0 +; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v2, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] +entry: + %b.zext = zext i32 %b to i64 + %add.a.b = add i64 8589934591, %b.zext ; 0x1FFFFFFFF + %shr = lshr i64 %add.a.b, 32 + %add.c.shr = add i64 %c, %shr + ret i64 %add.c.shr +} + +define <3 x i32> @add_i96(<3 x i32> %0, <3 x i32> %1) #0 { +; VI-LABEL: add_i96: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, v4, v1 +; VI-NEXT: v_addc_u32_e64 v4, s[4:5], 0, 0, vcc +; VI-NEXT: v_add_u32_e32 v0, vcc, v3, v0 +; VI-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc +; VI-NEXT: v_add_u32_e32 v1, vcc, v1, v3 +; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc +; VI-NEXT: v_add_u32_e32 v2, vcc, v5, v2 +; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v3 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: add_i96: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v4, v1 +; GFX9-NEXT: v_addc_co_u32_e64 v4, s[4:5], 0, 0, vcc +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v3, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v1, v3 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v4, vcc +; GFX9-NEXT: v_add3_u32 v2, v5, v2, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: add_i96: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_add_co_u32 v0, s4, v3, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4 +; GFX10-NEXT: v_add_co_u32 v1, s4, v4, v1 +; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s4, 0, 0, s4 +; GFX10-NEXT: v_add_co_u32 v1, vcc_lo, v1, v3 +; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v4, vcc_lo +; GFX10-NEXT: v_add3_u32 v2, v5, v2, v3 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: add_i96: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_add_co_u32 v0, s0, v3, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0 +; GFX11-NEXT: v_add_co_u32 v1, s0, v4, v1 +; GFX11-NEXT: v_add_co_ci_u32_e64 v4, null, 0, 0, s0 +; GFX11-NEXT: v_add_co_u32 v1, vcc_lo, v1, v3 +; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v4, vcc_lo +; GFX11-NEXT: v_add3_u32 v2, v5, v2, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %3 = extractelement <3 x i32> %0, i64 0 + %4 = zext i32 %3 to i64 + %5 = extractelement <3 x i32> %1, i64 0 + %6 = zext i32 %5 to i64 + %7 = add nuw nsw i64 %6, %4 + %8 = extractelement <3 x i32> %0, i64 1 + %9 = zext i32 %8 to i64 + %10 = extractelement <3 x i32> %1, i64 1 + %11 = zext i32 %10 to i64 + %12 = add nuw nsw i64 %11, %9 + %13 = lshr i64 %7, 32 + %14 = add nuw nsw i64 %12, %13 + %15 = extractelement <3 x i32> %0, i64 2 + %16 = extractelement <3 x i32> %1, i64 2 + %17 = add i32 %16, %15 + %18 = lshr i64 %14, 32 + %19 = trunc i64 %18 to i32 + %20 = add i32 %17, %19 + %21 = trunc i64 %7 to i32 + %22 = insertelement <3 x i32> undef, i32 %21, i32 0 + %23 = trunc i64 %14 to i32 + %24 = insertelement <3 x i32> %22, i32 %23, i32 1 + %25 = insertelement <3 x i32> %24, i32 %20, i32 2 + ret <3 x i32> %25 +}