Index: llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -678,6 +678,11 @@ /// (fsub (fmul x, y), z) -> (fmad x, y, -z) bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo); + /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) + /// (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z)) + bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI, + BuildFnTy &MatchInfo); + private: /// Given a non-indexed load or store instruction \p MI, find an offset that /// can be usefully and legally folded into it as a post-indexing operation. Index: llvm/include/llvm/Target/GlobalISel/Combine.td =================================================================== --- llvm/include/llvm/Target/GlobalISel/Combine.td +++ llvm/include/llvm/Target/GlobalISel/Combine.td @@ -810,6 +810,15 @@ ${info}); }]), (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; +// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) +// (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x) +def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule< + (defs root:$root, build_fn_matchinfo:$info), + (match (wip_match_opcode G_FSUB):$root, + [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root}, + ${info}); }]), + (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; + // FIXME: These should use the custom predicate feature once it lands. def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero, undef_to_negative_one, @@ -844,8 +853,8 @@ def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma, combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma, - combine_fadd_fpext_fma_fmul_to_fmad_or_fma, - combine_fsub_fmul_to_fmad_or_fma]>; + combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma, + combine_fsub_fneg_fmul_to_fmad_or_fma]>; def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines, extract_vec_elt_combines, combines_for_extload, Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -5197,6 +5197,53 @@ return false; } +bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA( + MachineInstr &MI, std::function &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_FSUB); + + bool AllowFusionGlobally, HasFMAD, Aggressive; + if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) + return false; + + Register LHSReg = MI.getOperand(1).getReg(); + Register RHSReg = MI.getOperand(2).getReg(); + LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); + + unsigned PreferredFusedOpcode = + HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; + + MachineInstr *FMulMI; + // fold (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z)) + if (mi_match(LHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) && + (Aggressive || (MRI.hasOneNonDBGUse(LHSReg) && + MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) && + isContractableFMul(*FMulMI, AllowFusionGlobally)) { + MatchInfo = [=, &MI](MachineIRBuilder &B) { + Register NegX = + B.buildFNeg(DstTy, FMulMI->getOperand(1).getReg()).getReg(0); + Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0); + B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, + {NegX, FMulMI->getOperand(2).getReg(), NegZ}); + }; + return true; + } + + // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x) + if (mi_match(RHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) && + (Aggressive || (MRI.hasOneNonDBGUse(RHSReg) && + MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) && + isContractableFMul(*FMulMI, AllowFusionGlobally)) { + MatchInfo = [=, &MI](MachineIRBuilder &B) { + B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, + {FMulMI->getOperand(1).getReg(), + FMulMI->getOperand(2).getReg(), LHSReg}); + }; + return true; + } + + return false; +} + bool CombinerHelper::tryCombine(MachineInstr &MI) { if (tryCombineCopy(MI)) return true; Index: llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-sub-neg-mul.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-sub-neg-mul.ll @@ -0,0 +1,394 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 -fp-contract=fast < %s | FileCheck -check-prefix=GFX9-CONTRACT %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX9-DENORM %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 -fp-contract=fast < %s | FileCheck -check-prefix=GFX10-CONTRACT %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX10-DENORM %s + +; fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) +define float @test_f32_sub_ext_neg_mul(float %x, float %y, float %z) { +; GFX9-LABEL: test_f32_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f32_e64 v0, v0, -v1 +; GFX9-NEXT: v_sub_f32_e32 v0, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_f32_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: v_fma_f32 v0, v0, -v1, -v2 +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_f32_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_mad_f32 v0, v0, -v1, -v2 +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_f32_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_mul_f32_e64 v0, v0, -v1 +; GFX10-NEXT: v_sub_f32_e32 v0, v0, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_f32_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: v_fma_f32 v0, v0, -v1, -v2 +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_f32_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_mad_f32 v0, v0, -v1, -v2 +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul float %x, %y + %b = fneg float %a + %c = fsub float %b, %z + ret float %c +} + +define half @test_f16_sub_ext_neg_mul(half %x, half %y, half %z) { +; GFX9-LABEL: test_f16_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f16_e64 v0, v0, -v1 +; GFX9-NEXT: v_add_f16_e64 v0, v0, -v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_f16_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: s_mov_b32 s4, 0x8000 +; GFX9-CONTRACT-NEXT: v_xor_b32_e32 v1, s4, v1 +; GFX9-CONTRACT-NEXT: v_xor_b32_e32 v2, s4, v2 +; GFX9-CONTRACT-NEXT: v_fma_f16 v0, v0, v1, v2 +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_f16_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_mad_legacy_f16 v0, v0, -v1, -v2 +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_f16_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_mul_f16_e64 v0, v0, -v1 +; GFX10-NEXT: v_add_f16_e64 v0, v0, -v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_f16_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: s_mov_b32 s4, 0x8000 +; GFX10-CONTRACT-NEXT: v_xor_b32_e32 v1, s4, v1 +; GFX10-CONTRACT-NEXT: v_xor_b32_e32 v2, s4, v2 +; GFX10-CONTRACT-NEXT: v_fma_f16 v0, v0, v1, v2 +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_f16_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_mul_f16_e64 v0, v0, -v1 +; GFX10-DENORM-NEXT: v_add_f16_e64 v0, v0, -v2 +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul half %x, %y + %b = fneg half %a + %c = fsub half %b, %z + ret half %c +} + +define double @test_f64_sub_ext_neg_mul(double %x, double %y, double %z) { +; GFX9-LABEL: test_f64_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX9-NEXT: v_add_f64 v[0:1], -v[0:1], -v[4:5] +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_f64_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: v_fma_f64 v[0:1], -v[0:1], v[2:3], -v[4:5] +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_f64_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX9-DENORM-NEXT: v_add_f64 v[0:1], -v[0:1], -v[4:5] +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_f64_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX10-NEXT: v_add_f64 v[0:1], -v[0:1], -v[4:5] +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_f64_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: v_fma_f64 v[0:1], -v[0:1], v[2:3], -v[4:5] +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_f64_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX10-DENORM-NEXT: v_add_f64 v[0:1], -v[0:1], -v[4:5] +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul double %x, %y + %b = fneg double %a + %c = fsub double %b, %z + ret double %c +} + +; fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) +define <4 x float> @test_v4f32_sub_ext_neg_mul(<4 x float> %x, <4 x float> %y, <4 x float> %z) { +; GFX9-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f32_e64 v0, v0, -v4 +; GFX9-NEXT: v_mul_f32_e64 v1, v1, -v5 +; GFX9-NEXT: v_mul_f32_e64 v2, v2, -v6 +; GFX9-NEXT: v_mul_f32_e64 v3, v3, -v7 +; GFX9-NEXT: v_sub_f32_e32 v0, v0, v8 +; GFX9-NEXT: v_sub_f32_e32 v1, v1, v9 +; GFX9-NEXT: v_sub_f32_e32 v2, v2, v10 +; GFX9-NEXT: v_sub_f32_e32 v3, v3, v11 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: v_fma_f32 v0, v0, -v4, -v8 +; GFX9-CONTRACT-NEXT: v_fma_f32 v1, v1, -v5, -v9 +; GFX9-CONTRACT-NEXT: v_fma_f32 v2, v2, -v6, -v10 +; GFX9-CONTRACT-NEXT: v_fma_f32 v3, v3, -v7, -v11 +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_mad_f32 v0, v0, -v4, -v8 +; GFX9-DENORM-NEXT: v_mad_f32 v1, v1, -v5, -v9 +; GFX9-DENORM-NEXT: v_mad_f32 v2, v2, -v6, -v10 +; GFX9-DENORM-NEXT: v_mad_f32 v3, v3, -v7, -v11 +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_mul_f32_e64 v0, v0, -v4 +; GFX10-NEXT: v_mul_f32_e64 v1, v1, -v5 +; GFX10-NEXT: v_mul_f32_e64 v2, v2, -v6 +; GFX10-NEXT: v_mul_f32_e64 v3, v3, -v7 +; GFX10-NEXT: v_sub_f32_e32 v0, v0, v8 +; GFX10-NEXT: v_sub_f32_e32 v1, v1, v9 +; GFX10-NEXT: v_sub_f32_e32 v2, v2, v10 +; GFX10-NEXT: v_sub_f32_e32 v3, v3, v11 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: v_fma_f32 v0, v0, -v4, -v8 +; GFX10-CONTRACT-NEXT: v_fma_f32 v1, v1, -v5, -v9 +; GFX10-CONTRACT-NEXT: v_fma_f32 v2, v2, -v6, -v10 +; GFX10-CONTRACT-NEXT: v_fma_f32 v3, v3, -v7, -v11 +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_mad_f32 v0, v0, -v4, -v8 +; GFX10-DENORM-NEXT: v_mad_f32 v1, v1, -v5, -v9 +; GFX10-DENORM-NEXT: v_mad_f32 v2, v2, -v6, -v10 +; GFX10-DENORM-NEXT: v_mad_f32 v3, v3, -v7, -v11 +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul <4 x float> %x, %y + %b = fneg <4 x float> %a + %c = fsub <4 x float> %b, %z + ret <4 x float> %c +} + +define <4 x half> @test_v4f16_sub_ext_neg_mul(<4 x half> %x, <4 x half> %y, <4 x half> %z) { +; GFX9-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_mul_f16 v0, v0, v2 neg_lo:[0,1] neg_hi:[0,1] +; GFX9-NEXT: v_pk_mul_f16 v1, v1, v3 neg_lo:[0,1] neg_hi:[0,1] +; GFX9-NEXT: v_add_f16_e64 v2, v0, -v4 +; GFX9-NEXT: v_add_f16_sdwa v0, v0, -v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX9-NEXT: v_add_f16_e64 v3, v1, -v5 +; GFX9-NEXT: v_add_f16_sdwa v1, v1, -v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff +; GFX9-NEXT: v_and_or_b32 v0, v2, v4, v0 +; GFX9-NEXT: v_and_or_b32 v1, v3, v4, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: v_pk_fma_f16 v0, v0, v2, v4 neg_lo:[0,1,1] neg_hi:[0,1,1] +; GFX9-CONTRACT-NEXT: v_pk_fma_f16 v1, v1, v3, v5 neg_lo:[0,1,1] neg_hi:[0,1,1] +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_pk_mul_f16 v0, v0, v2 neg_lo:[0,1] neg_hi:[0,1] +; GFX9-DENORM-NEXT: v_pk_mul_f16 v1, v1, v3 neg_lo:[0,1] neg_hi:[0,1] +; GFX9-DENORM-NEXT: v_add_f16_e64 v2, v0, -v4 +; GFX9-DENORM-NEXT: v_add_f16_sdwa v0, v0, -v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX9-DENORM-NEXT: v_add_f16_e64 v3, v1, -v5 +; GFX9-DENORM-NEXT: v_add_f16_sdwa v1, v1, -v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX9-DENORM-NEXT: v_mov_b32_e32 v4, 0xffff +; GFX9-DENORM-NEXT: v_and_or_b32 v0, v2, v4, v0 +; GFX9-DENORM-NEXT: v_and_or_b32 v1, v3, v4, v1 +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_pk_mul_f16 v0, v0, v2 neg_lo:[0,1] neg_hi:[0,1] +; GFX10-NEXT: v_pk_mul_f16 v1, v1, v3 neg_lo:[0,1] neg_hi:[0,1] +; GFX10-NEXT: v_add_f16_e64 v2, v0, -v4 +; GFX10-NEXT: v_add_f16_sdwa v0, v0, -v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX10-NEXT: v_add_f16_e64 v3, v1, -v5 +; GFX10-NEXT: v_mov_b32_e32 v4, 0xffff +; GFX10-NEXT: v_add_f16_sdwa v1, v1, -v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX10-NEXT: v_and_or_b32 v0, v2, v4, v0 +; GFX10-NEXT: v_and_or_b32 v1, v3, v4, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v0, v0, v2, v4 neg_lo:[0,1,1] neg_hi:[0,1,1] +; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v1, v1, v3, v5 neg_lo:[0,1,1] neg_hi:[0,1,1] +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v0, v0, v2 neg_lo:[0,1] neg_hi:[0,1] +; GFX10-DENORM-NEXT: v_pk_mul_f16 v1, v1, v3 neg_lo:[0,1] neg_hi:[0,1] +; GFX10-DENORM-NEXT: v_add_f16_e64 v2, v0, -v4 +; GFX10-DENORM-NEXT: v_add_f16_sdwa v0, v0, -v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX10-DENORM-NEXT: v_add_f16_e64 v3, v1, -v5 +; GFX10-DENORM-NEXT: v_mov_b32_e32 v4, 0xffff +; GFX10-DENORM-NEXT: v_add_f16_sdwa v1, v1, -v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX10-DENORM-NEXT: v_and_or_b32 v0, v2, v4, v0 +; GFX10-DENORM-NEXT: v_and_or_b32 v1, v3, v4, v1 +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul <4 x half> %x, %y + %b = fneg <4 x half> %a + %c = fsub <4 x half> %b, %z + ret <4 x half> %c +} + +define <4 x double> @test_v4f64_sub_ext_neg_mul(<4 x double> %x, <4 x double> %y, <4 x double> %z) { +; GFX9-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9] +; GFX9-NEXT: v_mul_f64 v[2:3], v[2:3], v[10:11] +; GFX9-NEXT: v_mul_f64 v[4:5], v[4:5], v[12:13] +; GFX9-NEXT: v_mul_f64 v[6:7], v[6:7], v[14:15] +; GFX9-NEXT: v_add_f64 v[0:1], -v[0:1], -v[16:17] +; GFX9-NEXT: v_add_f64 v[2:3], -v[2:3], -v[18:19] +; GFX9-NEXT: v_add_f64 v[4:5], -v[4:5], -v[20:21] +; GFX9-NEXT: v_add_f64 v[6:7], -v[6:7], -v[22:23] +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: v_fma_f64 v[0:1], -v[0:1], v[8:9], -v[16:17] +; GFX9-CONTRACT-NEXT: v_fma_f64 v[2:3], -v[2:3], v[10:11], -v[18:19] +; GFX9-CONTRACT-NEXT: v_fma_f64 v[4:5], -v[4:5], v[12:13], -v[20:21] +; GFX9-CONTRACT-NEXT: v_fma_f64 v[6:7], -v[6:7], v[14:15], -v[22:23] +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9] +; GFX9-DENORM-NEXT: v_mul_f64 v[2:3], v[2:3], v[10:11] +; GFX9-DENORM-NEXT: v_mul_f64 v[4:5], v[4:5], v[12:13] +; GFX9-DENORM-NEXT: v_mul_f64 v[6:7], v[6:7], v[14:15] +; GFX9-DENORM-NEXT: v_add_f64 v[0:1], -v[0:1], -v[16:17] +; GFX9-DENORM-NEXT: v_add_f64 v[2:3], -v[2:3], -v[18:19] +; GFX9-DENORM-NEXT: v_add_f64 v[4:5], -v[4:5], -v[20:21] +; GFX9-DENORM-NEXT: v_add_f64 v[6:7], -v[6:7], -v[22:23] +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9] +; GFX10-NEXT: v_mul_f64 v[2:3], v[2:3], v[10:11] +; GFX10-NEXT: v_mul_f64 v[4:5], v[4:5], v[12:13] +; GFX10-NEXT: v_mul_f64 v[6:7], v[6:7], v[14:15] +; GFX10-NEXT: v_add_f64 v[0:1], -v[0:1], -v[16:17] +; GFX10-NEXT: v_add_f64 v[2:3], -v[2:3], -v[18:19] +; GFX10-NEXT: v_add_f64 v[4:5], -v[4:5], -v[20:21] +; GFX10-NEXT: v_add_f64 v[6:7], -v[6:7], -v[22:23] +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: v_fma_f64 v[0:1], -v[0:1], v[8:9], -v[16:17] +; GFX10-CONTRACT-NEXT: v_fma_f64 v[2:3], -v[2:3], v[10:11], -v[18:19] +; GFX10-CONTRACT-NEXT: v_fma_f64 v[4:5], -v[4:5], v[12:13], -v[20:21] +; GFX10-CONTRACT-NEXT: v_fma_f64 v[6:7], -v[6:7], v[14:15], -v[22:23] +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9] +; GFX10-DENORM-NEXT: v_mul_f64 v[2:3], v[2:3], v[10:11] +; GFX10-DENORM-NEXT: v_mul_f64 v[4:5], v[4:5], v[12:13] +; GFX10-DENORM-NEXT: v_mul_f64 v[6:7], v[6:7], v[14:15] +; GFX10-DENORM-NEXT: v_add_f64 v[0:1], -v[0:1], -v[16:17] +; GFX10-DENORM-NEXT: v_add_f64 v[2:3], -v[2:3], -v[18:19] +; GFX10-DENORM-NEXT: v_add_f64 v[4:5], -v[4:5], -v[20:21] +; GFX10-DENORM-NEXT: v_add_f64 v[6:7], -v[6:7], -v[22:23] +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul <4 x double> %x, %y + %b = fneg <4 x double> %a + %c = fsub <4 x double> %b, %z + ret <4 x double> %c +}