Index: llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -410,6 +410,16 @@ MachineInstr &MI, std::tuple &MatchInfo); + /// Transform (fsub (fpext (fneg (fmul x, y))), z) + /// -> (fneg (fma (fpext x), (fpext y), z)) + /// -> (fneg (fmad (fpext x), (fpext y), z)) + bool matchCombineFSubFpExtFNegFMulToFMadOrFMA( + MachineInstr &MI, + std::tuple &MatchInfo); + bool applyCombineFSubFpExtFNegFMulToFMadOrFMA( + MachineInstr &MI, + std::tuple &MatchInfo); + /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x). bool matchCombineTruncOfExt(MachineInstr &MI, std::pair &MatchInfo); Index: llvm/include/llvm/Target/GlobalISel/Combine.td =================================================================== --- llvm/include/llvm/Target/GlobalISel/Combine.td +++ llvm/include/llvm/Target/GlobalISel/Combine.td @@ -680,6 +680,18 @@ (apply [{ return Helper.applyCombineFSubFpExtFMulToFMadOrFMA(*${root}, ${info}); }])>; +// Transform (fsub (fneg (fpext (fmul x, y))), z) -> +// (fneg (fma (fpext x), (fpext y), z)) +def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma_info : + GIDefMatchData<"std::tuple">; +def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule< + (defs root:$root, combine_fsub_fpext_fneg_fmul_to_fmad_or_fma_info:$info), + (match (wip_match_opcode G_FSUB):$root, + [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA( + *${root}, ${info}); }]), + (apply [{ return Helper.applyCombineFSubFpExtFNegFMulToFMadOrFMA( + *${root}, ${info}); }])>; + // Currently only the one combine above. def insert_vec_elt_combines : GICombineGroup< [combine_insert_vec_elts_build_vector]>; @@ -809,11 +821,12 @@ constant_fold, combine_fadd_fmul_to_fmad_or_fma, combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma, combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma, - combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma]>; + combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma, + combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>; // A combine group used to for prelegalizer combiners at -O0. The combines in // this group have been selected based on experiments to balance code size and // compile time performance. def optnone_combines : GICombineGroup<[trivial_combines, ptr_add_immed_chain, combines_for_extload, - not_cmp_fold, opt_brcond_by_inverting_cond]>; \ No newline at end of file + not_cmp_fold, opt_brcond_by_inverting_cond]>; Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -5071,6 +5071,137 @@ return true; } +bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA( + MachineInstr &MI, + std::tuple &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_FSUB); + + auto *MF = MI.getParent()->getParent(); + const auto &TLI = *MF->getSubtarget().getTargetLowering(); + const TargetOptions &Options = MF->getTarget().Options; + LLT DstType = MRI.getType(MI.getOperand(0).getReg()); + MachineInstr *MI0 = MRI.getVRegDef(MI.getOperand(1).getReg()); + MachineInstr *MI1 = MRI.getVRegDef(MI.getOperand(2).getReg()); + + bool LegalOperations = LI; + // Floating-point multiply-add with intermediate rounding. + bool HasFMAD = (LegalOperations && TLI.isFMADLegal(MI, DstType)); + // Floating-point multiply-add without intermediate rounding. + bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) && + isLegalOrBeforeLegalizer({TargetOpcode::G_FMA, {DstType}}); + + // No valid opcode, do not combine. + if (!HasFMAD && !HasFMA) + return false; + + bool CanFuse = + Options.UnsafeFPMath || MI.getFlag(MachineInstr::MIFlag::FmContract); + bool AllowFusionGlobally = + (Options.AllowFPOpFusion == FPOpFusion::Fast || CanFuse || HasFMAD); + + // If the addition is not contractable, do not combine. + if (!AllowFusionGlobally && !MI.getFlag(MachineInstr::MIFlag::FmContract)) + return false; + + unsigned PreferredFusedOpcode = + HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; + + Register FpExtReg, NegReg; + // fold (fsub (fpext (fneg (fmul x, y))), z) -> + // (fneg (fma (fpext x), (fpext y), z)) + if (mi_match(MI0->getOperand(0).getReg(), MRI, m_GFPExt(m_Reg(FpExtReg)))) { + if (mi_match(FpExtReg, MRI, m_GFNeg(m_Reg(NegReg))) && + TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, + MRI.getType(FpExtReg))) { + MachineInstr *MI000 = MRI.getVRegDef(NegReg); + if (isContractableFMul(*MI000, AllowFusionGlobally)) { + MatchInfo = {MI000->getOperand(1).getReg(), + MI000->getOperand(2).getReg(), MI1->getOperand(0).getReg(), + PreferredFusedOpcode, true}; + return true; + } + } + } + + // fold (fsub (fneg (fpext (fmul x, y))), z) -> + // (fneg (fma (fpext x), (fpext y), z)) + if (mi_match(MI0->getOperand(0).getReg(), MRI, m_GFNeg(m_Reg(NegReg)))) { + if (mi_match(NegReg, MRI, m_GFPExt(m_Reg(FpExtReg)))) { + MachineInstr *MI000 = MRI.getVRegDef(FpExtReg); + if (isContractableFMul(*MI000, AllowFusionGlobally) && + TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, + MRI.getType(FpExtReg))) { + MatchInfo = {MI000->getOperand(1).getReg(), + MI000->getOperand(2).getReg(), MI1->getOperand(0).getReg(), + PreferredFusedOpcode, true}; + return true; + } + } + } + + // fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x) + if (mi_match(MI1->getOperand(0).getReg(), MRI, m_GFPExt(m_Reg(FpExtReg)))) { + if (mi_match(FpExtReg, MRI, m_GFNeg(m_Reg(NegReg))) && + TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, + MRI.getType(FpExtReg))) { + MachineInstr *MI100 = MRI.getVRegDef(NegReg); + if (isContractableFMul(*MI100, AllowFusionGlobally)) { + MatchInfo = {MI100->getOperand(1).getReg(), + MI100->getOperand(2).getReg(), MI0->getOperand(0).getReg(), + PreferredFusedOpcode, false}; + return true; + } + } + } + + // fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x) + if (mi_match(MI1->getOperand(0).getReg(), MRI, m_GFNeg(m_Reg(NegReg)))) { + if (mi_match(NegReg, MRI, m_GFPExt(m_Reg(FpExtReg)))) { + MachineInstr *MI100 = MRI.getVRegDef(FpExtReg); + if (isContractableFMul(*MI100, AllowFusionGlobally) && + TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, + MRI.getType(FpExtReg))) { + MatchInfo = {MI100->getOperand(1).getReg(), + MI100->getOperand(2).getReg(), MI0->getOperand(0).getReg(), + PreferredFusedOpcode, false}; + return true; + } + } + } + + return false; +} + +bool CombinerHelper::applyCombineFSubFpExtFNegFMulToFMadOrFMA( + MachineInstr &MI, + std::tuple &MatchInfo) { + Register Src1, Src2, Src3; + unsigned PreferredFusedOpcode; + bool HasFirstFMUL; + std::tie(Src1, Src2, Src3, PreferredFusedOpcode, HasFirstFMUL) = MatchInfo; + + Builder.setInstrAndDebugLoc(MI); + LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); + if (HasFirstFMUL) { + Src1 = Builder.buildFPExt(DstTy, Src1).getReg(0); + Src1 = Builder.buildFNeg(DstTy, Src1).getReg(0); + Src2 = Builder.buildFPExt(DstTy, Src2).getReg(0); + Src3 = Builder.buildFNeg(DstTy, Src3).getReg(0); + + Builder.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, + {Src1, Src2, Src3}); + } else { + Src1 = Builder.buildFPExt(DstTy, Src1).getReg(0); + Src2 = Builder.buildFPExt(DstTy, Src2).getReg(0); + + Builder.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, + {Src1, Src2, Src3}); + } + + MI.eraseFromParent(); + return true; +} + bool CombinerHelper::tryCombine(MachineInstr &MI) { if (tryCombineCopy(MI)) return true; Index: llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-sub-ext-neg-mul.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-sub-ext-neg-mul.ll @@ -0,0 +1,259 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX9-DENORM %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX10-DENORM %s + +; fold (fsub (fpext (fneg (fmul, x, y))), z) -> (fneg (fma (fpext x), (fpext y), z)) +define amdgpu_vs float @test_f16_to_f32_sub_ext_neg_mul(half %x, half %y, float %z) { +; GFX9-DENORM-LABEL: test_f16_to_f32_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX9-DENORM-NEXT: v_mad_f32 v0, -v0, v1, -v2 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_f16_to_f32_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX10-DENORM-NEXT: v_fma_f32 v0, -v0, v1, -v2 +; GFX10-DENORM-NEXT: ; return to shader part epilog +entry: + %a = fmul fast half %x, %y + %b = fneg half %a + %c = fpext half %b to float + %d = fsub fast float %c, %z + ret float %d +} + +; fold (fsub (fneg (fpext (fmul, x, y))), z) -> (fneg (fma (fpext x)), (fpext y), z) +define amdgpu_vs float @test_f16_to_f32_sub_neg_ext_mul(half %x, half %y, float %z) { +; GFX9-DENORM-LABEL: test_f16_to_f32_sub_neg_ext_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX9-DENORM-NEXT: v_mad_f32 v0, -v0, v1, -v2 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_f16_to_f32_sub_neg_ext_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX10-DENORM-NEXT: v_fma_f32 v0, -v0, v1, -v2 +; GFX10-DENORM-NEXT: ; return to shader part epilog +entry: + %a = fmul fast half %x, %y + %b = fpext half %a to float + %c = fneg float %b + %d = fsub fast float %c, %z + ret float %d +} + + +; fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x) +define amdgpu_vs float @test_f16_to_f32_sub_ext_neg_mul2(float %x, half %y, half %z) { +; GFX9-DENORM-LABEL: test_f16_to_f32_sub_ext_neg_mul2: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v2 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v1, v2 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_f16_to_f32_sub_ext_neg_mul2: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v2 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v0, v1, v2 +; GFX10-DENORM-NEXT: ; return to shader part epilog +entry: + %a = fmul fast half %y, %z + %b = fneg half %a + %c = fpext half %b to float + %d = fsub fast float %x, %c + ret float %d +} + +; fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x) +define amdgpu_vs float @test_f16_to_f32_sub_neg_ext_mul2(float %x, half %y, half %z) { +; GFX9-DENORM-LABEL: test_f16_to_f32_sub_neg_ext_mul2: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v2 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v1, v2 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_f16_to_f32_sub_neg_ext_mul2: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v2 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v0, v1, v2 +; GFX10-DENORM-NEXT: ; return to shader part epilog +entry: + %a = fmul fast half %y, %z + %b = fpext half %a to float + %c = fneg float %b + %d = fsub fast float %x, %c + ret float %d +} + +; fold (fsub (fpext (fneg (fmul, x, y))), z) -> (fneg (fma (fpext x), (fpext y), z)) +define amdgpu_vs <4 x float> @test_v4f16_to_v4f32_sub_ext_neg_mul(<4 x half> %x, <4 x half> %y, <4 x float> %z) { +; GFX9-DENORM-LABEL: test_v4f16_to_v4f32_sub_ext_neg_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX9-DENORM-NEXT: s_mov_b32 s0, 0x80008000 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX9-DENORM-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX9-DENORM-NEXT: v_xor_b32_e32 v1, s0, v1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v0 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v8, v1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v0, v2, v4 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v1, v3, v5 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v2, v8, v6 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v3, v9, v7 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_v4f16_to_v4f32_sub_ext_neg_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v8, v0 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v0, v2 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v10, v1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v3 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_fma_f32 v0, -v8, v0, -v4 +; GFX10-DENORM-NEXT: v_fma_f32 v1, -v9, v1, -v5 +; GFX10-DENORM-NEXT: v_fma_f32 v2, -v10, v2, -v6 +; GFX10-DENORM-NEXT: v_fma_f32 v3, -v11, v3, -v7 +; GFX10-DENORM-NEXT: ; return to shader part epilog +entry: + %a = fmul fast <4 x half> %x, %y + %b = fneg <4 x half> %a + %c = fpext <4 x half> %b to <4 x float> + %d = fsub fast <4 x float> %c, %z + ret <4 x float> %d +} + +; fold (fsub (fneg (fpext (fmul, x, y))), z) -> (fneg (fma (fpext x)), (fpext y), z) +define amdgpu_vs <4 x float> @test_v4f16_to_v4f32_sub_neg_ext_mul(<4 x half> %x, <4 x half> %y, <4 x float> %z) { +; GFX9-DENORM-LABEL: test_v4f16_to_v4f32_sub_neg_ext_mul: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v0 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v8, v1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_sub_f32_e64 v0, -v2, v4 +; GFX9-DENORM-NEXT: v_sub_f32_e64 v1, -v3, v5 +; GFX9-DENORM-NEXT: v_sub_f32_e64 v2, -v8, v6 +; GFX9-DENORM-NEXT: v_sub_f32_e64 v3, -v9, v7 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_v4f16_to_v4f32_sub_neg_ext_mul: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v8, v0 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v0, v2 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v10, v1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v3 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_fma_f32 v0, -v8, v0, -v4 +; GFX10-DENORM-NEXT: v_fma_f32 v1, -v9, v1, -v5 +; GFX10-DENORM-NEXT: v_fma_f32 v2, -v10, v2, -v6 +; GFX10-DENORM-NEXT: v_fma_f32 v3, -v11, v3, -v7 +; GFX10-DENORM-NEXT: ; return to shader part epilog +entry: + %a = fmul fast <4 x half> %x, %y + %b = fpext <4 x half> %a to <4 x float> + %c = fneg <4 x float> %b + %d = fsub fast <4 x float> %c, %z + ret <4 x float> %d +} + + +; fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x) +define amdgpu_vs <4 x float> @test_v4f16_to_v4f32_sub_ext_neg_mul2(<4 x float> %x, <4 x half> %y, <4 x half> %z) { +; GFX9-DENORM-LABEL: test_v4f16_to_v4f32_sub_ext_neg_mul2: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: v_pk_mul_f16 v4, v4, v6 +; GFX9-DENORM-NEXT: s_mov_b32 s0, 0x80008000 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v5, v5, v7 +; GFX9-DENORM-NEXT: v_xor_b32_e32 v4, s0, v4 +; GFX9-DENORM-NEXT: v_xor_b32_e32 v5, s0, v5 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v6, v4 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v0, v0, v6 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v2, v2, v7 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v1, v1, v4 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v3, v3, v5 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_v4f16_to_v4f32_sub_ext_neg_mul2: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v8, v4 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v10, v6 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v9, v5 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v11, v7 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v0, v8, v10 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v2, v9, v11 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v1, v4, v6 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v3, v5, v7 +; GFX10-DENORM-NEXT: ; return to shader part epilog +entry: + %a = fmul fast <4 x half> %y, %z + %b = fneg <4 x half> %a + %c = fpext <4 x half> %b to <4 x float> + %d = fsub fast <4 x float> %x, %c + ret <4 x float> %d +} + +; fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x) +define amdgpu_vs <4 x float> @test_v4f16_to_v4f32_sub_neg_ext_mul2(<4 x float> %x, <4 x half> %y, <4 x half> %z) { +; GFX9-DENORM-LABEL: test_v4f16_to_v4f32_sub_neg_ext_mul2: +; GFX9-DENORM: ; %bb.0: ; %entry +; GFX9-DENORM-NEXT: v_pk_mul_f16 v4, v4, v6 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v5, v5, v7 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v6, v4 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_sub_f32_e64 v0, v0, -v6 +; GFX9-DENORM-NEXT: v_sub_f32_e64 v2, v2, -v7 +; GFX9-DENORM-NEXT: v_sub_f32_e64 v1, v1, -v4 +; GFX9-DENORM-NEXT: v_sub_f32_e64 v3, v3, -v5 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_v4f16_to_v4f32_sub_neg_ext_mul2: +; GFX10-DENORM: ; %bb.0: ; %entry +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v8, v4 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v10, v6 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v9, v5 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v11, v7 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v0, v8, v10 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v2, v9, v11 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v1, v4, v6 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v3, v5, v7 +; GFX10-DENORM-NEXT: ; return to shader part epilog +entry: + %a = fmul fast <4 x half> %y, %z + %b = fpext <4 x half> %a to <4 x float> + %c = fneg <4 x float> %b + %d = fsub fast <4 x float> %x, %c + ret <4 x float> %d +}