Index: llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -387,6 +387,15 @@ MachineInstr &MI, std::tuple &MatchInfo); + /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) + /// -> (fmad (fneg x), y, (fneg z)) + bool matchCombineFSubFNegFMulToFMadOrFMA( + MachineInstr &MI, + std::tuple &MatchInfo); + bool applyCombineFSubFNegFMulToFMadOrFMA( + MachineInstr &MI, + std::tuple &MatchInfo); + /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x). bool matchCombineTruncOfExt(MachineInstr &MI, std::pair &MatchInfo); Index: llvm/include/llvm/Target/GlobalISel/Combine.td =================================================================== --- llvm/include/llvm/Target/GlobalISel/Combine.td +++ llvm/include/llvm/Target/GlobalISel/Combine.td @@ -637,6 +637,18 @@ (apply [{ return Helper.applyCombineFSubFMulToFMadOrFMA(*${root}, ${info}); }])>; +// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) +// (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x) +def combine_fsub_fneg_fmul_to_fmad_or_fma_info : + GIDefMatchData<"std::tuple">; +def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule< + (defs root:$root, combine_fsub_fneg_fmul_to_fmad_or_fma_info:$info), + (match (wip_match_opcode G_FSUB):$root, + [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root}, + ${info}); }]), + (apply [{ return Helper.applyCombineFSubFNegFMulToFMadOrFMA(*${root}, + ${info}); }])>; + // Currently only the one combine above. def insert_vec_elt_combines : GICombineGroup< [combine_insert_vec_elts_build_vector]>; @@ -721,7 +733,7 @@ div_rem_to_divrem, funnel_shift_combines, combine_fadd_fmul_to_fmad_or_fma, combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma, combine_fadd_fpext_fma_fmul_to_fmad_or_fma, - combine_fsub_fmul_to_fmad_or_fma]>; + combine_fsub_fmul_to_fmad_or_fma, combine_fsub_fneg_fmul_to_fmad_or_fma]>; // A combine group used to for prelegalizer combiners at -O0. The combines in // this group have been selected based on experiments to balance code size and Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -4500,6 +4500,97 @@ return true; } +bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA( + MachineInstr &MI, + std::tuple &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_FSUB); + + auto *MF = MI.getParent()->getParent(); + const auto &TLI = *MF->getSubtarget().getTargetLowering(); + const TargetOptions &Options = MF->getTarget().Options; + LLT DstType = MRI.getType(MI.getOperand(0).getReg()); + MachineInstr *MI0 = MRI.getVRegDef(MI.getOperand(1).getReg()); + MachineInstr *MI1 = MRI.getVRegDef(MI.getOperand(2).getReg()); + + bool LegalOperations = LI; + // Floating-point multiply-add with intermediate rounding. + bool HasFMAD = (LegalOperations && TLI.isFMADLegal(MI, DstType)); + // Floating-point multiply-add without intermediate rounding. + bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) && + isLegalOrBeforeLegalizer({TargetOpcode::G_FMA, {DstType}}); + + // No valid opcode, do not combine. + if (!HasFMAD && !HasFMA) + return false; + + bool CanFuse = + Options.UnsafeFPMath || MI.getFlag(MachineInstr::MIFlag::FmContract); + bool AllowFusionGlobally = + (Options.AllowFPOpFusion == FPOpFusion::Fast || CanFuse || HasFMAD); + + // If the addition is not contractable, do not combine. + if (!AllowFusionGlobally && !MI.getFlag(MachineInstr::MIFlag::FmContract)) + return false; + + unsigned PreferredFusedOpcode = + HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; + bool Aggressive = TLI.enableAggressiveFMAFusion(DstType); + + Register NegReg; + // fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) + if (mi_match(MI0->getOperand(0).getReg(), MRI, m_GFNeg(m_Reg(NegReg))) && + (Aggressive || (MRI.hasOneNonDBGUse(MI0->getOperand(0).getReg()) && + MRI.hasOneNonDBGUse(MI0->getOperand(1).getReg())))) { + MachineInstr *MI00 = MRI.getVRegDef(NegReg); + if (isContractableFMul(*MI00, AllowFusionGlobally)) { + MatchInfo = {MI00->getOperand(1).getReg(), MI00->getOperand(2).getReg(), + MI1->getOperand(0).getReg(), PreferredFusedOpcode, true}; + return true; + } + } + + // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x) + if (mi_match(MI1->getOperand(0).getReg(), MRI, m_GFNeg(m_Reg(NegReg))) && + (Aggressive || (MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg()) && + MRI.hasOneNonDBGUse(MI1->getOperand(1).getReg())))) { + MachineInstr *MI10 = MRI.getVRegDef(NegReg); + if (isContractableFMul(*MI10, AllowFusionGlobally)) { + MatchInfo = {MI10->getOperand(1).getReg(), MI10->getOperand(2).getReg(), + MI0->getOperand(0).getReg(), PreferredFusedOpcode, false}; + return true; + } + } + + return false; +} + +bool CombinerHelper::applyCombineFSubFNegFMulToFMadOrFMA( + MachineInstr &MI, + std::tuple &MatchInfo) { + Register Src1, Src2, Src3; + unsigned PreferredFusedOpcode; + bool HasFirstFMUL; + std::tie(Src1, Src2, Src3, PreferredFusedOpcode, HasFirstFMUL) = MatchInfo; + + Builder.setInstrAndDebugLoc(MI); + + LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); + if (HasFirstFMUL) { + Src1 = Builder.buildFNeg(DstTy, Src1).getReg(0); + Src3 = Builder.buildFNeg(DstTy, Src3).getReg(0); + } else { + Register X = Src1, Y = Src2, Z = Src3; + Src1 = Y; + Src2 = Z; + Src3 = X; + } + + Builder.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, + {Src1, Src2, Src3}); + MI.eraseFromParent(); + return true; +} + bool CombinerHelper::tryCombine(MachineInstr &MI) { if (tryCombineCopy(MI)) return true; Index: llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-sub-neg-mul.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-sub-neg-mul.ll @@ -0,0 +1,407 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 -fp-contract=fast < %s | FileCheck -check-prefix=GFX9-CONTRACT %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX9-DENORM %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 -fp-contract=fast < %s | FileCheck -check-prefix=GFX10-CONTRACT %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX10-DENORM %s + +; fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) +define float @test_f32_sub_ext_neg_mul(float %x, float %y, float %z) { +; GFX9-LABEL: test_f32_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX9-NEXT: v_sub_f32_e64 v0, -v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_f32_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: v_fma_f32 v0, -v0, v1, -v2 +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_f32_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_mad_f32 v0, -v0, v1, -v2 +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_f32_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX10-NEXT: v_sub_f32_e64 v0, -v0, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_f32_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: v_fma_f32 v0, -v0, v1, -v2 +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_f32_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_mad_f32 v0, -v0, v1, -v2 +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul float %x, %y + %b = fneg float %a + %c = fsub float %b, %z + ret float %c +} + +define half @test_f16_sub_ext_neg_mul(half %x, half %y, half %z) { +; GFX9-LABEL: test_f16_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX9-NEXT: v_add_f16_e64 v0, -v0, -v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_f16_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: s_mov_b32 s4, 0x8000 +; GFX9-CONTRACT-NEXT: v_xor_b32_e32 v0, s4, v0 +; GFX9-CONTRACT-NEXT: v_xor_b32_e32 v2, s4, v2 +; GFX9-CONTRACT-NEXT: v_fma_f16 v0, v0, v1, v2 +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_f16_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX9-DENORM-NEXT: v_add_f16_e64 v0, -v0, -v2 +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_f16_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX10-NEXT: v_add_f16_e64 v0, -v0, -v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_f16_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: s_mov_b32 s4, 0x8000 +; GFX10-CONTRACT-NEXT: v_xor_b32_e32 v0, s4, v0 +; GFX10-CONTRACT-NEXT: v_xor_b32_e32 v2, s4, v2 +; GFX10-CONTRACT-NEXT: v_fma_f16 v0, v0, v1, v2 +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_f16_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX10-DENORM-NEXT: v_add_f16_e64 v0, -v0, -v2 +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul half %x, %y + %b = fneg half %a + %c = fsub half %b, %z + ret half %c +} + +define double @test_f64_sub_ext_neg_mul(double %x, double %y, double %z) { +; GFX9-LABEL: test_f64_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX9-NEXT: v_add_f64 v[0:1], -v[0:1], -v[4:5] +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_f64_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: v_fma_f64 v[0:1], -v[0:1], v[2:3], -v[4:5] +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_f64_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX9-DENORM-NEXT: v_add_f64 v[0:1], -v[0:1], -v[4:5] +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_f64_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX10-NEXT: v_add_f64 v[0:1], -v[0:1], -v[4:5] +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_f64_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: v_fma_f64 v[0:1], -v[0:1], v[2:3], -v[4:5] +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_f64_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX10-DENORM-NEXT: v_add_f64 v[0:1], -v[0:1], -v[4:5] +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul double %x, %y + %b = fneg double %a + %c = fsub double %b, %z + ret double %c +} + +; fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) +define <4 x float> @test_v4f32_sub_ext_neg_mul(<4 x float> %x, <4 x float> %y, <4 x float> %z) { +; GFX9-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f32_e32 v0, v0, v4 +; GFX9-NEXT: v_mul_f32_e32 v1, v1, v5 +; GFX9-NEXT: v_mul_f32_e32 v2, v2, v6 +; GFX9-NEXT: v_mul_f32_e32 v3, v3, v7 +; GFX9-NEXT: v_sub_f32_e64 v0, -v0, v8 +; GFX9-NEXT: v_sub_f32_e64 v1, -v1, v9 +; GFX9-NEXT: v_sub_f32_e64 v2, -v2, v10 +; GFX9-NEXT: v_sub_f32_e64 v3, -v3, v11 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: v_fma_f32 v0, -v0, v4, -v8 +; GFX9-CONTRACT-NEXT: v_fma_f32 v1, -v1, v5, -v9 +; GFX9-CONTRACT-NEXT: v_fma_f32 v2, -v2, v6, -v10 +; GFX9-CONTRACT-NEXT: v_fma_f32 v3, -v3, v7, -v11 +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_mad_f32 v0, -v0, v4, -v8 +; GFX9-DENORM-NEXT: v_mad_f32 v1, -v1, v5, -v9 +; GFX9-DENORM-NEXT: v_mad_f32 v2, -v2, v6, -v10 +; GFX9-DENORM-NEXT: v_mad_f32 v3, -v3, v7, -v11 +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_mul_f32_e32 v0, v0, v4 +; GFX10-NEXT: v_mul_f32_e32 v1, v1, v5 +; GFX10-NEXT: v_mul_f32_e32 v2, v2, v6 +; GFX10-NEXT: v_mul_f32_e32 v3, v3, v7 +; GFX10-NEXT: v_sub_f32_e64 v0, -v0, v8 +; GFX10-NEXT: v_sub_f32_e64 v1, -v1, v9 +; GFX10-NEXT: v_sub_f32_e64 v2, -v2, v10 +; GFX10-NEXT: v_sub_f32_e64 v3, -v3, v11 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: v_fma_f32 v0, -v0, v4, -v8 +; GFX10-CONTRACT-NEXT: v_fma_f32 v1, -v1, v5, -v9 +; GFX10-CONTRACT-NEXT: v_fma_f32 v2, -v2, v6, -v10 +; GFX10-CONTRACT-NEXT: v_fma_f32 v3, -v3, v7, -v11 +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_v4f32_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_mad_f32 v0, -v0, v4, -v8 +; GFX10-DENORM-NEXT: v_mad_f32 v1, -v1, v5, -v9 +; GFX10-DENORM-NEXT: v_mad_f32 v2, -v2, v6, -v10 +; GFX10-DENORM-NEXT: v_mad_f32 v3, -v3, v7, -v11 +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul <4 x float> %x, %y + %b = fneg <4 x float> %a + %c = fsub <4 x float> %b, %z + ret <4 x float> %c +} + +define <4 x half> @test_v4f16_sub_ext_neg_mul(<4 x half> %x, <4 x half> %y, <4 x half> %z) { +; GFX9-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX9-NEXT: s_mov_b32 s4, 0x80008000 +; GFX9-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX9-NEXT: v_xor_b32_e32 v0, s4, v0 +; GFX9-NEXT: v_xor_b32_e32 v1, s4, v1 +; GFX9-NEXT: v_add_f16_e64 v2, v0, -v4 +; GFX9-NEXT: v_add_f16_sdwa v0, v0, -v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX9-NEXT: v_add_f16_e64 v3, v1, -v5 +; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff +; GFX9-NEXT: v_add_f16_sdwa v1, v1, -v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX9-NEXT: v_and_or_b32 v0, v2, v4, v0 +; GFX9-NEXT: v_and_or_b32 v1, v3, v4, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: v_pk_fma_f16 v0, v0, v2, v4 neg_lo:[1,0,1] neg_hi:[1,0,1] +; GFX9-CONTRACT-NEXT: v_pk_fma_f16 v1, v1, v3, v5 neg_lo:[1,0,1] neg_hi:[1,0,1] +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX9-DENORM-NEXT: s_mov_b32 s4, 0x80008000 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX9-DENORM-NEXT: v_xor_b32_e32 v0, s4, v0 +; GFX9-DENORM-NEXT: v_xor_b32_e32 v1, s4, v1 +; GFX9-DENORM-NEXT: v_add_f16_e64 v2, v0, -v4 +; GFX9-DENORM-NEXT: v_add_f16_sdwa v0, v0, -v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX9-DENORM-NEXT: v_add_f16_e64 v3, v1, -v5 +; GFX9-DENORM-NEXT: v_mov_b32_e32 v4, 0xffff +; GFX9-DENORM-NEXT: v_add_f16_sdwa v1, v1, -v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX9-DENORM-NEXT: v_and_or_b32 v0, v2, v4, v0 +; GFX9-DENORM-NEXT: v_and_or_b32 v1, v3, v4, v1 +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX10-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX10-NEXT: s_mov_b32 s4, 0x80008000 +; GFX10-NEXT: v_mov_b32_e32 v2, 0xffff +; GFX10-NEXT: v_xor_b32_e32 v0, s4, v0 +; GFX10-NEXT: v_xor_b32_e32 v1, s4, v1 +; GFX10-NEXT: v_add_f16_e64 v3, v0, -v4 +; GFX10-NEXT: v_add_f16_sdwa v0, v0, -v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX10-NEXT: v_add_f16_e64 v4, v1, -v5 +; GFX10-NEXT: v_add_f16_sdwa v1, v1, -v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX10-NEXT: v_and_or_b32 v0, v3, v2, v0 +; GFX10-NEXT: v_and_or_b32 v1, v4, v2, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v0, v0, v2, v4 neg_lo:[1,0,1] neg_hi:[1,0,1] +; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v1, v1, v3, v5 neg_lo:[1,0,1] neg_hi:[1,0,1] +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_v4f16_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX10-DENORM-NEXT: s_mov_b32 s4, 0x80008000 +; GFX10-DENORM-NEXT: v_mov_b32_e32 v2, 0xffff +; GFX10-DENORM-NEXT: v_xor_b32_e32 v0, s4, v0 +; GFX10-DENORM-NEXT: v_xor_b32_e32 v1, s4, v1 +; GFX10-DENORM-NEXT: v_add_f16_e64 v3, v0, -v4 +; GFX10-DENORM-NEXT: v_add_f16_sdwa v0, v0, -v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX10-DENORM-NEXT: v_add_f16_e64 v4, v1, -v5 +; GFX10-DENORM-NEXT: v_add_f16_sdwa v1, v1, -v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX10-DENORM-NEXT: v_and_or_b32 v0, v3, v2, v0 +; GFX10-DENORM-NEXT: v_and_or_b32 v1, v4, v2, v1 +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul <4 x half> %x, %y + %b = fneg <4 x half> %a + %c = fsub <4 x half> %b, %z + ret <4 x half> %c +} + +define <4 x double> @test_v4f64_sub_ext_neg_mul(<4 x double> %x, <4 x double> %y, <4 x double> %z) { +; GFX9-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9] +; GFX9-NEXT: v_mul_f64 v[2:3], v[2:3], v[10:11] +; GFX9-NEXT: v_mul_f64 v[4:5], v[4:5], v[12:13] +; GFX9-NEXT: v_mul_f64 v[6:7], v[6:7], v[14:15] +; GFX9-NEXT: v_add_f64 v[0:1], -v[0:1], -v[16:17] +; GFX9-NEXT: v_add_f64 v[2:3], -v[2:3], -v[18:19] +; GFX9-NEXT: v_add_f64 v[4:5], -v[4:5], -v[20:21] +; GFX9-NEXT: v_add_f64 v[6:7], -v[6:7], -v[22:23] +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-CONTRACT-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX9-CONTRACT: ; %bb.0: ; %entry +; GFX9-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-CONTRACT-NEXT: v_fma_f64 v[0:1], -v[0:1], v[8:9], -v[16:17] +; GFX9-CONTRACT-NEXT: v_fma_f64 v[2:3], -v[2:3], v[10:11], -v[18:19] +; GFX9-CONTRACT-NEXT: v_fma_f64 v[4:5], -v[4:5], v[12:13], -v[20:21] +; GFX9-CONTRACT-NEXT: v_fma_f64 v[6:7], -v[6:7], v[14:15], -v[22:23] +; GFX9-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-DENORM-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9] +; GFX9-DENORM-NEXT: v_mul_f64 v[2:3], v[2:3], v[10:11] +; GFX9-DENORM-NEXT: v_mul_f64 v[4:5], v[4:5], v[12:13] +; GFX9-DENORM-NEXT: v_mul_f64 v[6:7], v[6:7], v[14:15] +; GFX9-DENORM-NEXT: v_add_f64 v[0:1], -v[0:1], -v[16:17] +; GFX9-DENORM-NEXT: v_add_f64 v[2:3], -v[2:3], -v[18:19] +; GFX9-DENORM-NEXT: v_add_f64 v[4:5], -v[4:5], -v[20:21] +; GFX9-DENORM-NEXT: v_add_f64 v[6:7], -v[6:7], -v[22:23] +; GFX9-DENORM-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9] +; GFX10-NEXT: v_mul_f64 v[2:3], v[2:3], v[10:11] +; GFX10-NEXT: v_mul_f64 v[4:5], v[4:5], v[12:13] +; GFX10-NEXT: v_mul_f64 v[6:7], v[6:7], v[14:15] +; GFX10-NEXT: v_add_f64 v[0:1], -v[0:1], -v[16:17] +; GFX10-NEXT: v_add_f64 v[2:3], -v[2:3], -v[18:19] +; GFX10-NEXT: v_add_f64 v[4:5], -v[4:5], -v[20:21] +; GFX10-NEXT: v_add_f64 v[6:7], -v[6:7], -v[22:23] +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-CONTRACT-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX10-CONTRACT: ; %bb.0: ; %entry +; GFX10-CONTRACT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-CONTRACT-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-CONTRACT-NEXT: v_fma_f64 v[0:1], -v[0:1], v[8:9], -v[16:17] +; GFX10-CONTRACT-NEXT: v_fma_f64 v[2:3], -v[2:3], v[10:11], -v[18:19] +; GFX10-CONTRACT-NEXT: v_fma_f64 v[4:5], -v[4:5], v[12:13], -v[20:21] +; GFX10-CONTRACT-NEXT: v_fma_f64 v[6:7], -v[6:7], v[14:15], -v[22:23] +; GFX10-CONTRACT-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-DENORM-LABEL: test_v4f64_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-DENORM-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-DENORM-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9] +; GFX10-DENORM-NEXT: v_mul_f64 v[2:3], v[2:3], v[10:11] +; GFX10-DENORM-NEXT: v_mul_f64 v[4:5], v[4:5], v[12:13] +; GFX10-DENORM-NEXT: v_mul_f64 v[6:7], v[6:7], v[14:15] +; GFX10-DENORM-NEXT: v_add_f64 v[0:1], -v[0:1], -v[16:17] +; GFX10-DENORM-NEXT: v_add_f64 v[2:3], -v[2:3], -v[18:19] +; GFX10-DENORM-NEXT: v_add_f64 v[4:5], -v[4:5], -v[20:21] +; GFX10-DENORM-NEXT: v_add_f64 v[6:7], -v[6:7], -v[22:23] +; GFX10-DENORM-NEXT: s_setpc_b64 s[30:31] +entry: + %a = fmul <4 x double> %x, %y + %b = fneg <4 x double> %a + %c = fsub <4 x double> %b, %z + ret <4 x double> %c +}