Index: lib/Target/AMDGPU/AMDGPUISelLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.h +++ lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -84,6 +84,7 @@ SDValue performCtlzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS, SDValue RHS, DAGCombinerInfo &DCI) const; SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const; + SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const; static EVT getEquivalentMemType(LLVMContext &Context, EVT VT); Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -480,6 +480,7 @@ setTargetDAGCombine(ISD::STORE); setTargetDAGCombine(ISD::FADD); setTargetDAGCombine(ISD::FSUB); + setTargetDAGCombine(ISD::FNEG); } //===----------------------------------------------------------------------===// @@ -2734,6 +2735,52 @@ return performCtlzCombine(SDLoc(N), Cond, True, False, DCI); } +SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N, + DAGCombinerInfo &DCI) const { + if (DCI.isBeforeLegalizeOps()) + return SDValue(); + + SelectionDAG &DAG = DCI.DAG; + SDValue N0 = N->getOperand(0); + EVT VT = N->getValueType(0); + + unsigned Opc = N0.getOpcode(); + + // If the input has multiple uses and we can either fold the negate down, or + // the other uses cannot, give up. This both prevents unprofitable + // transformations and infinite loops: we won't repeatedly try to fold around + // a negate that has no 'good' form. + // + // TODO: Check users can fold + if ((Opc == ISD::FADD || Opc == ISD::FMUL || + Opc == ISD::FMA || Opc == ISD::FMAD) && + !N0.hasOneUse()) + return SDValue(); + + switch (Opc) { + case AMDGPUISD::RCP: + case AMDGPUISD::RCP_LEGACY: { + SDLoc SL(N); + SDValue Src = N0.getOperand(0); + + // (fneg (rcp (fneg x))) -> (rcp x) + if (Src.getOpcode() == ISD::FNEG) { + return DCI.DAG.getNode(Opc, SL, VT, Src.getOperand(0)); + } + + // (fneg (rcp x)) -> (rcp (fneg x)) + if (N0.hasOneUse()) { // TODO: Check users + SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Src); + return DAG.getNode(Opc, SL, VT, Neg); + } + + return SDValue(); + } + default: + return SDValue(); + } +} + SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -2839,6 +2886,8 @@ return performMulLoHi24Combine(N, DCI); case ISD::SELECT: return performSelectCombine(N, DCI); + case ISD::FNEG: + return performFNegCombine(N, DCI); case AMDGPUISD::BFE_I32: case AMDGPUISD::BFE_U32: { assert(!N->getValueType(0).isVector() && Index: test/CodeGen/AMDGPU/fneg-combines.ll =================================================================== --- test/CodeGen/AMDGPU/fneg-combines.ll +++ test/CodeGen/AMDGPU/fneg-combines.ll @@ -791,9 +791,110 @@ ret void } +; -------------------------------------------------------------------------------- +; rcp tests +; -------------------------------------------------------------------------------- + +; GCN-LABEL: {{^}}v_fneg_rcp_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_rcp_f32_e64 [[RESULT:v[0-9]+]], -[[A]] +; GCN: buffer_store_dword [[RESULT]] +define void @v_fneg_rcp_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext + %a = load volatile float, float addrspace(1)* %a.gep + %rcp = call float @llvm.amdgcn.rcp.f32(float %a) + %fneg = fsub float -0.000000e+00, %rcp + store float %fneg, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_fneg_rcp_fneg_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_rcp_f32_e32 [[RESULT:v[0-9]+]], [[A]] +; GCN: buffer_store_dword [[RESULT]] +define void @v_fneg_rcp_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext + %a = load volatile float, float addrspace(1)* %a.gep + %fneg.a = fsub float -0.000000e+00, %a + %rcp = call float @llvm.amdgcn.rcp.f32(float %fneg.a) + %fneg = fsub float -0.000000e+00, %rcp + store float %fneg, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_fneg_rcp_store_use_fneg_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN-DAG: v_rcp_f32_e32 [[RESULT:v[0-9]+]], [[A]] +; GCN-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]] +; GCN: buffer_store_dword [[RESULT]] +; GCN: buffer_store_dword [[NEG_A]] +define void @v_fneg_rcp_store_use_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext + %a = load volatile float, float addrspace(1)* %a.gep + %fneg.a = fsub float -0.000000e+00, %a + %rcp = call float @llvm.amdgcn.rcp.f32(float %fneg.a) + %fneg = fsub float -0.000000e+00, %rcp + store volatile float %fneg, float addrspace(1)* %out.gep + store volatile float %fneg.a, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_fneg_rcp_multi_use_fneg_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN-DAG: v_rcp_f32_e32 [[RESULT:v[0-9]+]], [[A]] +; GCN-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}} +; GCN: buffer_store_dword [[RESULT]] +; GCN: buffer_store_dword [[MUL]] +define void @v_fneg_rcp_multi_use_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float %c) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext + %a = load volatile float, float addrspace(1)* %a.gep + %fneg.a = fsub float -0.000000e+00, %a + %rcp = call float @llvm.amdgcn.rcp.f32(float %fneg.a) + %fneg = fsub float -0.000000e+00, %rcp + %use1 = fmul float %fneg.a, %c + store volatile float %fneg, float addrspace(1)* %out.gep + store volatile float %use1, float addrspace(1)* undef + ret void +} + +; -------------------------------------------------------------------------------- +; rcp_legacy tests +; -------------------------------------------------------------------------------- + +; GCN-LABEL: {{^}}v_fneg_rcp_legacy_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_rcp_legacy_f32_e64 [[RESULT:v[0-9]+]], -[[A]] +; GCN: buffer_store_dword [[RESULT]] +define void @v_fneg_rcp_legacy_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext + %a = load volatile float, float addrspace(1)* %a.gep + %rcp = call float @llvm.amdgcn.rcp.legacy(float %a) + %fneg = fsub float -0.000000e+00, %rcp + store float %fneg, float addrspace(1)* %out.gep + ret void +} + + declare i32 @llvm.amdgcn.workitem.id.x() #1 declare float @llvm.fma.f32(float, float, float) #1 declare float @llvm.fmuladd.f32(float, float, float) #1 +declare float @llvm.amdgcn.rcp.f32(float) #1 +declare float @llvm.amdgcn.rcp.legacy(float) #1 attributes #0 = { nounwind } attributes #1 = { nounwind readnone }