Index: llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -365,6 +365,18 @@ std::tuple &MatchInfo); + // Transform (fadd (fma x, y, (fpext (fmul u, v))), z) + // -> (fma x, y, (fma (fpext u), (fpext v), z)) + // -> (fmad x, y, (fma (fpext u), (fpext v), z)) + bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive( + MachineInstr &MI, + std::tuple &MatchInfo); + bool applyCombineFAddFpExtFMulToFMadOrFMAAggressive( + MachineInstr &MI, + std::tuple &MatchInfo); + /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x). bool matchCombineTruncOfExt(MachineInstr &MI, std::pair &MatchInfo); Index: llvm/include/llvm/Target/GlobalISel/Combine.td =================================================================== --- llvm/include/llvm/Target/GlobalISel/Combine.td +++ llvm/include/llvm/Target/GlobalISel/Combine.td @@ -608,6 +608,17 @@ ${info}); }]), (apply [{ return Helper.applyCombineFAddFMAFMulToFMadOrFMA(*${root}, ${info}); }])>; +// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) -> +// (fma x, y, (fma (fpext u), (fpext v), z)) +def combine_fadd_fpext_fma_fmul_to_fmad_or_fma_info : + GIDefMatchData<"std::tuple">; +def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule< + (defs root:$root, combine_fadd_fpext_fma_fmul_to_fmad_or_fma_info:$info), + (match (wip_match_opcode G_FADD):$root, + [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive( + *${root}, ${info}); }]), + (apply [{ return Helper.applyCombineFAddFpExtFMulToFMadOrFMAAggressive( + *${root}, ${info}); }])>; // Currently only the one combine above. def insert_vec_elt_combines : GICombineGroup< @@ -656,4 +667,4 @@ const_combines, xor_of_and_with_same_reg, ptr_add_with_zero, shift_immed_chain, shift_of_shifted_logic_chain, combine_fadd_fmul_to_fmad_or_fma, combine_fadd_fpext_fmul_to_fmad_or_fma, - combine_fadd_fma_fmul_to_fmad_or_fma]>; + combine_fadd_fma_fmul_to_fmad_or_fma, combine_fadd_fpext_fma_fmul_to_fmad_or_fma]>; Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -3990,6 +3990,182 @@ return true; } +bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive( + MachineInstr &MI, + std::tuple &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_FADD); + + auto *MF = MI.getParent()->getParent(); + const auto &TLI = *MF->getSubtarget().getTargetLowering(); + const TargetOptions &Options = MF->getTarget().Options; + LLT DstType = MRI.getType(MI.getOperand(0).getReg()); + LLT SrcType = MRI.getType(MI.getOperand(1).getReg()); + MachineInstr *MI0 = MRI.getVRegDef(MI.getOperand(1).getReg()); + MachineInstr *MI1 = MRI.getVRegDef(MI.getOperand(2).getReg()); + + bool LegalOperations = + isLegal({TargetOpcode::G_FADD, {DstType, SrcType}}); + // Floating-point multiply-add with intermediate rounding. + bool HasFMAD = (LegalOperations && TLI.isFMADLegal(MI, DstType)); + // Floating-point multiply-add without intermediate rounding. + bool HasFMA = + TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) && + (!LegalOperations || isLegal({TargetOpcode::G_FMA, {DstType, SrcType}})); + + // No valid opcode, do not combine. + if (!HasFMAD && !HasFMA) + return false; + + bool CanFuse = + Options.UnsafeFPMath || MI.getFlag(MachineInstr::MIFlag::FmContract); + bool AllowFusionGlobally = + (Options.AllowFPOpFusion == FPOpFusion::Fast || CanFuse || HasFMAD); + + // If the addition is not contractable, do not combine. + if (!AllowFusionGlobally && !MI.getFlag(MachineInstr::MIFlag::FmContract)) + return false; + + unsigned PreferredFusedOpcode = + HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; + bool Aggressive = TLI.enableAggressiveFMAFusion(DstType); + + // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), + // prefer to fold the multiply with fewer uses. + if (Aggressive && + isContractableFMul(*MI0, AllowFusionGlobally) && + isContractableFMul(*MI1, AllowFusionGlobally)) { + if (std::distance( + MRI.use_instr_nodbg_begin(MI0->getOperand(0).getReg()), + MRI.use_instr_nodbg_end()) > + std::distance( + MRI.use_instr_nodbg_begin(MI1->getOperand(0).getReg()), + MRI.use_instr_nodbg_end())) + std::swap(MI0, MI1); + } + + // More folding opportunities when target permits. + if (Aggressive) { + // fold (fadd (fma x, y, (fpext (fmul u, v))), z) + // -> (fma x, y, (fma (fpext u), (fpext v), z)) + if (MI0->getOpcode() == PreferredFusedOpcode) { + MachineInstr *MI02 = MRI.getVRegDef(MI0->getOperand(3).getReg()); + if (MI02->getOpcode() == TargetOpcode::G_FPEXT) { + MachineInstr *MI020 = MRI.getVRegDef(MI02->getOperand(1).getReg()); + if (isContractableFMul(*MI020, AllowFusionGlobally) && + TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, + MRI.getType(MI020->getOperand(0).getReg()))) { + MatchInfo = {MI020->getOperand(1).getReg(), + MI020->getOperand(2).getReg(), + MI1->getOperand(0).getReg(), + MI0->getOperand(1).getReg(), + MI0->getOperand(2).getReg(), + PreferredFusedOpcode}; + return true; + } + } + } + + // fold (fadd (fpext (fma x, y, (fmul u, v))), z) + // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) + // FIXME: This turns two single-precision and one double-precision + // operation into two double-precision operations, which might not be + // interesting for all targets, especially GPUs. + if (MI0->getOpcode() == TargetOpcode::G_FPEXT) { + MachineInstr *MI00 = MRI.getVRegDef(MI0->getOperand(1).getReg()); + if (MI00->getOpcode() == PreferredFusedOpcode) { + MachineInstr *MI002 = MRI.getVRegDef(MI00->getOperand(3).getReg()); + if (isContractableFMul(*MI002, AllowFusionGlobally) && + TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, + MRI.getType(MI00->getOperand(0).getReg()))) { + MatchInfo = {MI002->getOperand(1).getReg(), + MI002->getOperand(2).getReg(), + MI1->getOperand(0).getReg(), + MI00->getOperand(1).getReg(), + MI00->getOperand(2).getReg(), + PreferredFusedOpcode}; + return true; + } + } + } + + // fold (fadd x, (fma y, z, (fpext (fmul u, v))) + // -> (fma y, z, (fma (fpext u), (fpext v), x)) + if (MI1->getOpcode() == PreferredFusedOpcode) { + MachineInstr *MI12 = MRI.getVRegDef(MI1->getOperand(3).getReg()); + if (MI12->getOpcode() == TargetOpcode::G_FPEXT) { + MachineInstr *MI120 = MRI.getVRegDef(MI12->getOperand(1).getReg()); + if (isContractableFMul(*MI120, AllowFusionGlobally) && + TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, + MRI.getType(MI120->getOperand(0).getReg()))) { + MatchInfo = {MI120->getOperand(1).getReg(), + MI120->getOperand(2).getReg(), + MI0->getOperand(0).getReg(), + MI1->getOperand(1).getReg(), + MI1->getOperand(2).getReg(), + PreferredFusedOpcode}; + return true; + } + } + } + + // fold (fadd x, (fpext (fma y, z, (fmul u, v))) + // -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x)) + // FIXME: This turns two single-precision and one double-precision + // operation into two double-precision operations, which might not be + // interesting for all targets, especially GPUs. + if (MI1->getOpcode() == TargetOpcode::G_FPEXT) { + MachineInstr *MI10 = MRI.getVRegDef(MI1->getOperand(1).getReg()); + if (MI10->getOpcode() == PreferredFusedOpcode) { + MachineInstr *MI102 = MRI.getVRegDef(MI10->getOperand(3).getReg()); + if (isContractableFMul(*MI102, AllowFusionGlobally) && + TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, + MRI.getType(MI10->getOperand(0).getReg()))) { + MatchInfo = {MI102->getOperand(1).getReg(), + MI102->getOperand(2).getReg(), + MI0->getOperand(0).getReg(), + MI10->getOperand(1).getReg(), + MI10->getOperand(2).getReg(), + PreferredFusedOpcode}; + return true;} + } + } + } + + return false; +} + +bool CombinerHelper::applyCombineFAddFpExtFMulToFMadOrFMAAggressive( + MachineInstr &MI, + std::tuple &MatchInfo) { + Register Src1, Src2, Src3, Src4, Src5; + unsigned PreferredFusedOpcode; + LLT DstType = MRI.getType(MI.getOperand(0).getReg()); + Builder.setInstrAndDebugLoc(MI); + std::tie(Src1, Src2, Src3, Src4, Src5, PreferredFusedOpcode) = MatchInfo; + + Register TmpReg = + Builder.buildInstr(PreferredFusedOpcode, {DstType}, + {Builder.buildFPExt(DstType, Src1).getReg(0), + Builder.buildFPExt(DstType, Src2).getReg(0), + Src3}).getReg(0); + + Register X = Src4; + Register Y = Src5; + LLT Ty1 = MRI.getType(X), Ty2 = MRI.getType(Y); + if (Ty1 != DstType && Ty2 != DstType) { + X = Builder.buildFPExt(DstType, Src4).getReg(0); + Y = Builder.buildFPExt(DstType, Src5).getReg(0); + } + + Builder.buildInstr(PreferredFusedOpcode, + {MI.getOperand(0).getReg()}, {X, Y, TmpReg}); + MI.eraseFromParent(); + + return true; +} + bool CombinerHelper::tryCombine(MachineInstr &MI) { if (tryCombineCopy(MI)) return true; Index: llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll @@ -0,0 +1,503 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX9-DENORM %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 -fp-contract=fast < %s | FileCheck -check-prefix=GFX10-CONTRACT %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX10-DENORM %s + +; fold (fadd (fma x, y, (fpext (fmul u, v))), z) -> (fma x, y, (fma (fpext u), (fpext v), z)) +define amdgpu_vs float @test_f16_f32_add_fma_ext_mul(float %x, float %y, float %z, half %u, half %v) { +; GFX9-DENORM-LABEL: test_f16_f32_add_fma_ext_mul: +; GFX9-DENORM: ; %bb.0: ; %.entry +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v4, v4 +; GFX9-DENORM-NEXT: v_mad_f32 v2, v3, v4, v2 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v2, v0, v1 +; GFX9-DENORM-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: test_f16_f32_add_fma_ext_mul: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX10-NEXT: v_fmac_f32_e32 v3, v0, v1 +; GFX10-NEXT: v_add_f32_e32 v0, v3, v2 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX10-CONTRACT-LABEL: test_f16_f32_add_fma_ext_mul: +; GFX10-CONTRACT: ; %bb.0: ; %.entry +; GFX10-CONTRACT-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v3, v0, v1 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v3, v2 +; GFX10-CONTRACT-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_f16_f32_add_fma_ext_mul: +; GFX10-DENORM: ; %bb.0: ; %.entry +; GFX10-DENORM-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v3, v0, v1 +; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v3, v2 +; GFX10-DENORM-NEXT: ; return to shader part epilog +.entry: + %a = fmul half %u, %v + %b = fpext half %a to float + %c = call float @llvm.fmuladd.f32(float %x, float %y, float %b) + %d = fadd float %c, %z + ret float %d +} + +; TODO: REMOVE ME: Selection DAG does not combine this example +; fold (fadd (fpext (fma x, y, (fmul u, v))), z) -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) +define amdgpu_vs float @test_f16_f32_add_ext_fma_mul(half %x, half %y, float %z, half %u, half %v) { +; GFX9-DENORM-LABEL: test_f16_f32_add_ext_fma_mul: +; GFX9-DENORM: ; %bb.0: ; %.entry +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v4, v4 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v5, v0 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX9-DENORM-NEXT: v_mad_f32 v0, v3, v4, v2 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v5, v1 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: test_f16_f32_add_ext_fma_mul: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-NEXT: v_fmac_f16_e32 v3, v0, v1 +; GFX10-NEXT: v_cvt_f32_f16_e32 v0, v3 +; GFX10-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX10-CONTRACT-LABEL: test_f16_f32_add_ext_fma_mul: +; GFX10-CONTRACT: ; %bb.0: ; %.entry +; GFX10-CONTRACT-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-CONTRACT-NEXT: v_fmac_f16_e32 v3, v0, v1 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v0, v3 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX10-CONTRACT-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_f16_f32_add_ext_fma_mul: +; GFX10-DENORM: ; %bb.0: ; %.entry +; GFX10-DENORM-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-DENORM-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX10-DENORM-NEXT: v_add_f16_e32 v0, v0, v3 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX10-DENORM-NEXT: ; return to shader part epilog +.entry: + %a = fmul half %u, %v + %b = call half @llvm.fmuladd.f16(half %x, half %y, half %a) + %c = fpext half %b to float + %d = fadd float %c, %z + ret float %d +} + +; fold (fadd x, (fma y, z, (fpext (fmul u, v))) -> (fma y, z, (fma (fpext u), (fpext v), x)) +define amdgpu_vs float @test_f16_f32_add_fma_ext_mul_rhs(float %x, float %y, float %z, half %u, half %v) { +; GFX9-DENORM-LABEL: test_f16_f32_add_fma_ext_mul_rhs: +; GFX9-DENORM: ; %bb.0: ; %.entry +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v4, v4 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v3, v4 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v1, v2 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: test_f16_f32_add_fma_ext_mul_rhs: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX10-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX10-NEXT: v_add_f32_e32 v0, v0, v3 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX10-CONTRACT-LABEL: test_f16_f32_add_fma_ext_mul_rhs: +; GFX10-CONTRACT: ; %bb.0: ; %.entry +; GFX10-CONTRACT-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v0, v3 +; GFX10-CONTRACT-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_f16_f32_add_fma_ext_mul_rhs: +; GFX10-DENORM: ; %bb.0: ; %.entry +; GFX10-DENORM-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v0, v3 +; GFX10-DENORM-NEXT: ; return to shader part epilog +.entry: + %a = fmul half %u, %v + %b = fpext half %a to float + %c = call float @llvm.fmuladd.f32(float %y, float %z, float %b) + %d = fadd float %x, %c + ret float %d +} + +; TODO: REMOVE ME: Selection DAG does not combine this example +; fold (fadd x, (fpext (fma y, z, (fmul u, v))) -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x)) +define amdgpu_vs float @test_f16_f32_add_ext_fma_mul_rhs(float %x, half %y, half %z, half %u, half %v) { +; GFX9-DENORM-LABEL: test_f16_f32_add_ext_fma_mul_rhs: +; GFX9-DENORM: ; %bb.0: ; %.entry +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v4, v4 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v2 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v3, v4 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v1, v2 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: test_f16_f32_add_ext_fma_mul_rhs: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-NEXT: v_fmac_f16_e32 v3, v1, v2 +; GFX10-NEXT: v_cvt_f32_f16_e32 v1, v3 +; GFX10-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX10-CONTRACT-LABEL: test_f16_f32_add_ext_fma_mul_rhs: +; GFX10-CONTRACT: ; %bb.0: ; %.entry +; GFX10-CONTRACT-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-CONTRACT-NEXT: v_fmac_f16_e32 v3, v1, v2 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v1, v3 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX10-CONTRACT-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_f16_f32_add_ext_fma_mul_rhs: +; GFX10-DENORM: ; %bb.0: ; %.entry +; GFX10-DENORM-NEXT: v_mul_f16_e32 v3, v3, v4 +; GFX10-DENORM-NEXT: v_mul_f16_e32 v1, v1, v2 +; GFX10-DENORM-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX10-DENORM-NEXT: ; return to shader part epilog +.entry: + %a = fmul half %u, %v + %b = call half @llvm.fmuladd.f16(half %y, half %z, half %a) + %c = fpext half %b to float + %d = fadd float %x, %c + ret float %d +} + +; fold (fadd (fma x, y, (fpext (fmul u, v))), z) -> (fma x, y, (fma (fpext u), (fpext v), z)) +define amdgpu_vs <4 x float> @test_v4f16_v4f32_add_fma_ext_mul(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x half> %u, <4 x half> %v) { +; GFX9-DENORM-LABEL: test_v4f16_v4f32_add_fma_ext_mul: +; GFX9-DENORM: ; %bb.0: ; %.entry +; GFX9-DENORM-NEXT: v_pk_mul_f16 v12, v12, v14 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v13, v13, v15 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v14, v12 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v15, v13 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v14, v0, v4 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v15, v2, v6 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v12, v1, v5 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v13, v3, v7 +; GFX9-DENORM-NEXT: v_add_f32_e32 v0, v14, v8 +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v12, v9 +; GFX9-DENORM-NEXT: v_add_f32_e32 v2, v15, v10 +; GFX9-DENORM-NEXT: v_add_f32_e32 v3, v13, v11 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: test_v4f16_v4f32_add_fma_ext_mul: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_pk_mul_f16 v12, v12, v14 +; GFX10-NEXT: v_pk_mul_f16 v13, v13, v15 +; GFX10-NEXT: v_cvt_f32_f16_e32 v14, v12 +; GFX10-NEXT: v_cvt_f32_f16_e32 v15, v13 +; GFX10-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-NEXT: v_fmac_f32_e32 v14, v0, v4 +; GFX10-NEXT: v_fmac_f32_e32 v15, v2, v6 +; GFX10-NEXT: v_fmac_f32_e32 v12, v1, v5 +; GFX10-NEXT: v_fmac_f32_e32 v13, v3, v7 +; GFX10-NEXT: v_add_f32_e32 v0, v14, v8 +; GFX10-NEXT: v_add_f32_e32 v2, v15, v10 +; GFX10-NEXT: v_add_f32_e32 v1, v12, v9 +; GFX10-NEXT: v_add_f32_e32 v3, v13, v11 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX10-CONTRACT-LABEL: test_v4f16_v4f32_add_fma_ext_mul: +; GFX10-CONTRACT: ; %bb.0: ; %.entry +; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v12, v12, v14 +; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v13, v13, v15 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v14, v12 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v15, v13 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v14, v0, v4 +; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v15, v2, v6 +; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v12, v1, v5 +; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v13, v3, v7 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v14, v8 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v2, v15, v10 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v1, v12, v9 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v3, v13, v11 +; GFX10-CONTRACT-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_v4f16_v4f32_add_fma_ext_mul: +; GFX10-DENORM: ; %bb.0: ; %.entry +; GFX10-DENORM-NEXT: v_pk_mul_f16 v12, v12, v14 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v13, v13, v15 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v14, v12 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v15, v13 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v14, v0, v4 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v15, v2, v6 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v12, v1, v5 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v13, v3, v7 +; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v14, v8 +; GFX10-DENORM-NEXT: v_add_f32_e32 v2, v15, v10 +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v12, v9 +; GFX10-DENORM-NEXT: v_add_f32_e32 v3, v13, v11 +; GFX10-DENORM-NEXT: ; return to shader part epilog +.entry: + %a = fmul <4 x half> %u, %v + %b = fpext <4 x half> %a to <4 x float> + %c = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %b) + %d = fadd <4 x float> %c, %z + ret <4 x float> %d +} + +; TODO: REMOVE ME: Selection DAG does not combine this example +; fold (fadd (fpext (fma x, y, (fmul u, v))), z) -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) +define amdgpu_vs <4 x float> @test_v4f16_v4f32_add_ext_fma_mul(<4 x half> %x, <4 x half> %y, <4 x float> %z, <4 x half> %u, <4 x half> %v) { +; GFX9-DENORM-LABEL: test_v4f16_v4f32_add_ext_fma_mul: +; GFX9-DENORM: ; %bb.0: ; %.entry +; GFX9-DENORM-NEXT: v_pk_mul_f16 v8, v8, v10 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v9, v9, v11 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX9-DENORM-NEXT: v_pk_add_f16 v0, v0, v8 +; GFX9-DENORM-NEXT: v_pk_add_f16 v1, v1, v9 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v0 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v8, v1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_add_f32_e32 v0, v2, v4 +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v3, v5 +; GFX9-DENORM-NEXT: v_add_f32_e32 v2, v8, v6 +; GFX9-DENORM-NEXT: v_add_f32_e32 v3, v9, v7 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: test_v4f16_v4f32_add_ext_fma_mul: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_pk_mul_f16 v15, v8, v10 +; GFX10-NEXT: v_pk_mul_f16 v10, v9, v11 +; GFX10-NEXT: v_pk_fma_f16 v0, v0, v2, v15 +; GFX10-NEXT: v_pk_fma_f16 v1, v1, v3, v10 +; GFX10-NEXT: v_cvt_f32_f16_e32 v2, v0 +; GFX10-NEXT: v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-NEXT: v_cvt_f32_f16_e32 v8, v1 +; GFX10-NEXT: v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-NEXT: v_add_f32_e32 v0, v2, v4 +; GFX10-NEXT: v_add_f32_e32 v1, v3, v5 +; GFX10-NEXT: v_add_f32_e32 v2, v8, v6 +; GFX10-NEXT: v_add_f32_e32 v3, v9, v7 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX10-CONTRACT-LABEL: test_v4f16_v4f32_add_ext_fma_mul: +; GFX10-CONTRACT: ; %bb.0: ; %.entry +; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v15, v8, v10 +; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v10, v9, v11 +; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v0, v0, v2, v15 +; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v1, v1, v3, v10 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v2, v0 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v8, v1 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v2, v4 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v1, v3, v5 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v2, v8, v6 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v3, v9, v7 +; GFX10-CONTRACT-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_v4f16_v4f32_add_ext_fma_mul: +; GFX10-DENORM: ; %bb.0: ; %.entry +; GFX10-DENORM-NEXT: v_pk_mul_f16 v15, v0, v2 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v8, v8, v10 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v2, v9, v11 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX10-DENORM-NEXT: v_pk_add_f16 v0, v15, v8 +; GFX10-DENORM-NEXT: v_pk_add_f16 v1, v1, v2 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v0 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v8, v1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v2, v4 +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v3, v5 +; GFX10-DENORM-NEXT: v_add_f32_e32 v2, v8, v6 +; GFX10-DENORM-NEXT: v_add_f32_e32 v3, v9, v7 +; GFX10-DENORM-NEXT: ; return to shader part epilog +.entry: + %a = fmul <4 x half> %u, %v + %b = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> %x, <4 x half> %y, <4 x half> %a) + %c = fpext <4 x half> %b to <4 x float> + %d = fadd <4 x float> %c, %z + ret <4 x float> %d +} + +; fold (fadd x, (fma y, z, (fpext (fmul u, v))) -> (fma y, z, (fma (fpext u), (fpext v), x)) +define amdgpu_vs <4 x float> @test_v4f16_v4f32_add_fma_ext_mul_rhs(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x half> %u, <4 x half> %v) { +; GFX9-DENORM-LABEL: test_v4f16_v4f32_add_fma_ext_mul_rhs: +; GFX9-DENORM: ; %bb.0: ; %.entry +; GFX9-DENORM-NEXT: v_pk_mul_f16 v12, v12, v14 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v13, v13, v15 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v14, v12 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v15, v13 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v14, v4, v8 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v15, v6, v10 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v12, v5, v9 +; GFX9-DENORM-NEXT: v_mac_f32_e32 v13, v7, v11 +; GFX9-DENORM-NEXT: v_add_f32_e32 v0, v0, v14 +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v1, v12 +; GFX9-DENORM-NEXT: v_add_f32_e32 v2, v2, v15 +; GFX9-DENORM-NEXT: v_add_f32_e32 v3, v3, v13 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: test_v4f16_v4f32_add_fma_ext_mul_rhs: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_pk_mul_f16 v12, v12, v14 +; GFX10-NEXT: v_pk_mul_f16 v13, v13, v15 +; GFX10-NEXT: v_cvt_f32_f16_e32 v14, v12 +; GFX10-NEXT: v_cvt_f32_f16_e32 v15, v13 +; GFX10-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-NEXT: v_fmac_f32_e32 v14, v4, v8 +; GFX10-NEXT: v_fmac_f32_e32 v15, v6, v10 +; GFX10-NEXT: v_fmac_f32_e32 v12, v5, v9 +; GFX10-NEXT: v_fmac_f32_e32 v13, v7, v11 +; GFX10-NEXT: v_add_f32_e32 v0, v0, v14 +; GFX10-NEXT: v_add_f32_e32 v2, v2, v15 +; GFX10-NEXT: v_add_f32_e32 v1, v1, v12 +; GFX10-NEXT: v_add_f32_e32 v3, v3, v13 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX10-CONTRACT-LABEL: test_v4f16_v4f32_add_fma_ext_mul_rhs: +; GFX10-CONTRACT: ; %bb.0: ; %.entry +; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v12, v12, v14 +; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v13, v13, v15 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v14, v12 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v15, v13 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v14, v4, v8 +; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v15, v6, v10 +; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v12, v5, v9 +; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v13, v7, v11 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v0, v14 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v2, v2, v15 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v1, v1, v12 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v3, v3, v13 +; GFX10-CONTRACT-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_v4f16_v4f32_add_fma_ext_mul_rhs: +; GFX10-DENORM: ; %bb.0: ; %.entry +; GFX10-DENORM-NEXT: v_pk_mul_f16 v12, v12, v14 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v13, v13, v15 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v14, v12 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v15, v13 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v14, v4, v8 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v15, v6, v10 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v12, v5, v9 +; GFX10-DENORM-NEXT: v_fmac_f32_e32 v13, v7, v11 +; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v0, v14 +; GFX10-DENORM-NEXT: v_add_f32_e32 v2, v2, v15 +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v1, v12 +; GFX10-DENORM-NEXT: v_add_f32_e32 v3, v3, v13 +; GFX10-DENORM-NEXT: ; return to shader part epilog +.entry: + %a = fmul <4 x half> %u, %v + %b = fpext <4 x half> %a to <4 x float> + %c = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %y, <4 x float> %z, <4 x float> %b) + %d = fadd <4 x float> %x, %c + ret <4 x float> %d +} + +; TODO: REMOVE ME: Selection DAG does not combine this example +; fold (fadd x, (fpext (fma y, z, (fmul u, v))) -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x)) +define amdgpu_vs <4 x float> @test_v4f16_v4f32_add_ext_fma_mul_rhs(<4 x float> %x, <4 x half> %y, <4 x half> %z, <4 x half> %u, <4 x half> %v) { +; GFX9-DENORM-LABEL: test_v4f16_v4f32_add_ext_fma_mul_rhs: +; GFX9-DENORM: ; %bb.0: ; %.entry +; GFX9-DENORM-NEXT: v_pk_mul_f16 v8, v8, v10 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v4, v4, v6 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v9, v9, v11 +; GFX9-DENORM-NEXT: v_pk_mul_f16 v5, v5, v7 +; GFX9-DENORM-NEXT: v_pk_add_f16 v4, v4, v8 +; GFX9-DENORM-NEXT: v_pk_add_f16 v5, v5, v9 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v6, v4 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-DENORM-NEXT: v_add_f32_e32 v0, v0, v6 +; GFX9-DENORM-NEXT: v_add_f32_e32 v2, v2, v7 +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v1, v4 +; GFX9-DENORM-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX9-DENORM-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: test_v4f16_v4f32_add_ext_fma_mul_rhs: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_pk_mul_f16 v15, v8, v10 +; GFX10-NEXT: v_pk_mul_f16 v10, v9, v11 +; GFX10-NEXT: v_pk_fma_f16 v4, v4, v6, v15 +; GFX10-NEXT: v_pk_fma_f16 v5, v5, v7, v10 +; GFX10-NEXT: v_cvt_f32_f16_e32 v6, v4 +; GFX10-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX10-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-NEXT: v_add_f32_e32 v0, v0, v6 +; GFX10-NEXT: v_add_f32_e32 v2, v2, v7 +; GFX10-NEXT: v_add_f32_e32 v1, v1, v4 +; GFX10-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX10-CONTRACT-LABEL: test_v4f16_v4f32_add_ext_fma_mul_rhs: +; GFX10-CONTRACT: ; %bb.0: ; %.entry +; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v15, v8, v10 +; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v10, v9, v11 +; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v4, v4, v6, v15 +; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v5, v5, v7, v10 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v6, v4 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v0, v6 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v2, v2, v7 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v1, v1, v4 +; GFX10-CONTRACT-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX10-CONTRACT-NEXT: ; return to shader part epilog +; +; GFX10-DENORM-LABEL: test_v4f16_v4f32_add_ext_fma_mul_rhs: +; GFX10-DENORM: ; %bb.0: ; %.entry +; GFX10-DENORM-NEXT: v_pk_mul_f16 v15, v4, v6 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v8, v8, v10 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v6, v9, v11 +; GFX10-DENORM-NEXT: v_pk_mul_f16 v5, v5, v7 +; GFX10-DENORM-NEXT: v_pk_add_f16 v4, v15, v8 +; GFX10-DENORM-NEXT: v_pk_add_f16 v5, v5, v6 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v6, v4 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v0, v6 +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v1, v4 +; GFX10-DENORM-NEXT: v_add_f32_e32 v2, v2, v7 +; GFX10-DENORM-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX10-DENORM-NEXT: ; return to shader part epilog +.entry: + %a = fmul <4 x half> %u, %v + %b = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> %y, <4 x half> %z, <4 x half> %a) + %c = fpext <4 x half> %b to <4 x float> + %d = fadd <4 x float> %x, %c + ret <4 x float> %d +} + +declare float @llvm.fmuladd.f32(float, float, float) #0 +declare half @llvm.fmuladd.f16(half, half, half) #0 +declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) #0 +declare <4 x half> @llvm.fmuladd.v4f16(<4 x half>, <4 x half>, <4 x half>) #0 + +attributes #0 = { nounwind readnone }