Index: llvm/lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -9962,20 +9962,38 @@ if (SDValue RV = reassociateScalarOps(N, DCI.DAG)) return RV; - EVT VT = N->getValueType(0); - if (VT != MVT::i64) - return SDValue(); - SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); const ConstantSDNode *CRHS = dyn_cast(RHS); - if (CRHS) { + SelectionDAG &DAG = DCI.DAG; + + EVT VT = N->getValueType(0); + if (CRHS && VT == MVT::i64) { if (SDValue Split = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) return Split; } + // Make sure to apply the 64-bit constant splitting fold before trying to fold + // fneg-like xors into 64-bit select. + if (LHS.getOpcode() == ISD::SELECT && VT == MVT::i32) { + // This looks like an fneg, try to fold as a source modifier. + if (CRHS && CRHS->getAPIntValue().isSignMask() && + shouldFoldFNegIntoSrc(N, LHS)) { + SDLoc DL(N); + SDValue CastLHS = + DAG.getNode(ISD::BITCAST, DL, MVT::f32, LHS->getOperand(1)); + SDValue CastRHS = + DAG.getNode(ISD::BITCAST, DL, MVT::f32, LHS->getOperand(2)); + SDValue FNegLHS = DAG.getNode(ISD::FNEG, DL, MVT::f32, CastLHS); + SDValue FNegRHS = DAG.getNode(ISD::FNEG, DL, MVT::f32, CastRHS); + SDValue NewSelect = DAG.getNode(ISD::SELECT, DL, MVT::f32, + LHS->getOperand(0), FNegLHS, FNegRHS); + return DAG.getNode(ISD::BITCAST, DL, VT, NewSelect); + } + } + return SDValue(); } Index: llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll +++ llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll @@ -9,8 +9,7 @@ ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_and_b32_e32 v0, 1, v0 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, -v1, vcc ; GCN-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: fneg_xor_select_i32: @@ -18,10 +17,9 @@ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo -; GFX11-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, -v1, vcc_lo ; GFX11-NEXT: s_setpc_b64 s[30:31] %select = select i1 %cond, i32 %arg0, i32 %arg1 %fneg = xor i32 %select, -2147483648 @@ -32,14 +30,12 @@ ; GCN-LABEL: fneg_xor_select_v2i32: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_and_b32_e32 v1, 1, v1 ; GCN-NEXT: v_and_b32_e32 v0, 1, v0 -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 -; GCN-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc +; GCN-NEXT: v_and_b32_e32 v1, 1, v1 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc -; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GCN-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 +; GCN-NEXT: v_cndmask_b32_e64 v0, -v4, -v2, vcc +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 +; GCN-NEXT: v_cndmask_b32_e64 v1, -v5, -v3, vcc ; GCN-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: fneg_xor_select_v2i32: @@ -47,14 +43,12 @@ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_and_b32_e32 v1, 1, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v2 :: v_dual_and_b32 v1, 1, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v0, -v4, -v2, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX11-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc_lo -; GFX11-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v1, -v5, -v3, vcc_lo ; GFX11-NEXT: s_setpc_b64 s[30:31] %select = select <2 x i1> %cond, <2 x i32> %arg0, <2 x i32> %arg1 %fneg = xor <2 x i32> %select, @@ -109,8 +103,7 @@ ; GCN-NEXT: v_and_b32_e32 v0, 1, v0 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 ; GCN-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc -; GCN-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc -; GCN-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 +; GCN-NEXT: v_cndmask_b32_e64 v1, -v4, -v2, vcc ; GCN-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: fneg_xor_select_i64: @@ -118,10 +111,10 @@ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11-NEXT: v_dual_cndmask_b32 v0, v3, v1 :: v_dual_cndmask_b32 v1, v4, v2 -; GFX11-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 +; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, -v4, -v2, vcc_lo ; GFX11-NEXT: s_setpc_b64 s[30:31] %select = select i1 %cond, i64 %arg0, i64 %arg1 %fneg = xor i64 %select, 9223372036854775808 @@ -138,10 +131,8 @@ ; GCN-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v1 ; GCN-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc ; GCN-NEXT: v_cndmask_b32_e64 v2, v8, v4, s[4:5] -; GCN-NEXT: v_cndmask_b32_e64 v4, v9, v5, s[4:5] -; GCN-NEXT: v_cndmask_b32_e32 v1, v7, v3, vcc -; GCN-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GCN-NEXT: v_xor_b32_e32 v3, 0x80000000, v4 +; GCN-NEXT: v_cndmask_b32_e64 v1, -v7, -v3, vcc +; GCN-NEXT: v_cndmask_b32_e64 v3, -v9, -v5, s[4:5] ; GCN-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: fneg_xor_select_v2i64: @@ -153,13 +144,10 @@ ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 ; GFX11-NEXT: v_dual_cndmask_b32 v0, v6, v2 :: v_dual_and_b32 v1, 1, v1 ; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 1, v1 -; GFX11-NEXT: v_cndmask_b32_e32 v1, v7, v3, vcc_lo -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_cndmask_b32_e64 v3, v9, v5, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v1, -v7, -v3, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, v4, s0 -; GFX11-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 +; GFX11-NEXT: v_cndmask_b32_e64 v3, -v9, -v5, s0 ; GFX11-NEXT: s_setpc_b64 s[30:31] %select = select <2 x i1> %cond, <2 x i64> %arg0, <2 x i64> %arg1 %fneg = xor <2 x i64> %select, @@ -298,7 +286,7 @@ ; GFX7-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc ; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc ; GFX7-NEXT: flat_store_dwordx2 v[5:6], v[0:1] -; GFX7-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 +; GFX7-NEXT: v_cndmask_b32_e64 v1, -v4, -v2, vcc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -311,7 +299,7 @@ ; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc ; GFX9-NEXT: global_store_dwordx2 v[5:6], v[0:1], off -; GFX9-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 +; GFX9-NEXT: v_cndmask_b32_e64 v1, -v4, -v2, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -323,8 +311,7 @@ ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 ; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v2 :: v_dual_cndmask_b32 v0, v3, v7 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_xor_b32_e32 v2, 0x80000000, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v2, -v4, -v2, vcc_lo ; GFX11-NEXT: global_store_b64 v[5:6], v[0:1], off ; GFX11-NEXT: v_mov_b32_e32 v1, v2 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0