Index: llvm/test/Analysis/CostModel/RISCV/arith-fp.ll =================================================================== --- llvm/test/Analysis/CostModel/RISCV/arith-fp.ll +++ llvm/test/Analysis/CostModel/RISCV/arith-fp.ll @@ -643,6 +643,67 @@ ret i32 undef } +define void @fmuladd() { +; CHECK-LABEL: 'fmuladd' +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.fmuladd.f16(half undef, half undef, half undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call float @llvm.fmuladd.f32(float undef, float undef, float undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call double @llvm.fmuladd.f64(double undef, double undef, double undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <2 x half> @llvm.fmuladd.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %7 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %10 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %11 = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %12 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %13 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %14 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %15 = call <16 x double> @llvm.fmuladd.v16f64(<16 x double> undef, <16 x double> undef, <16 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call @llvm.fmuladd.nxv2f16( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call @llvm.fmuladd.nxv4f16( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call @llvm.fmuladd.nxv8f16( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call @llvm.fmuladd.nxv16f16( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call @llvm.fmuladd.nxv2f32( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call @llvm.fmuladd.nxv4f32( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call @llvm.fmuladd.nxv8f32( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call @llvm.fmuladd.nxv16f32( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call @llvm.fmuladd.nxv2f64( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call @llvm.fmuladd.nxv4f64( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call @llvm.fmuladd.nxv8f64( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %27 = call @llvm.fmuladd.nxv16f64( undef, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; + call half @llvm.fmuladd.f16(half undef, half undef, half undef) + call float @llvm.fmuladd.f32(float undef, float undef, float undef) + call double @llvm.fmuladd.f64(double undef, double undef, double undef) + call <2 x half> @llvm.fmuladd.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) + call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) + call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) + call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) + call <2 x float> @llvm.fmuladd.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef) + call <4 x float> @llvm.fmuladd.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef) + call <8 x float> @llvm.fmuladd.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef) + call <16 x float> @llvm.fmuladd.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef) + call <2 x double> @llvm.fmuladd.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef) + call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef) + call <8 x double> @llvm.fmuladd.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef) + call <16 x double> @llvm.fmuladd.v16f64(<16 x double> undef, <16 x double> undef, <16 x double> undef) + call @llvm.fmuladd.nxv2f16( undef, undef, undef) + call @llvm.fmuladd.nxv4f16( undef, undef, undef) + call @llvm.fmuladd.nxv8f16( undef, undef, undef) + call @llvm.fmuladd.nxv16f16( undef, undef, undef) + call @llvm.fmuladd.nxv2f32( undef, undef, undef) + call @llvm.fmuladd.nxv4f32( undef, undef, undef) + call @llvm.fmuladd.nxv8f32( undef, undef, undef) + call @llvm.fmuladd.nxv16f32( undef, undef, undef) + call @llvm.fmuladd.nxv2f64( undef, undef, undef) + call @llvm.fmuladd.nxv4f64( undef, undef, undef) + call @llvm.fmuladd.nxv8f64( undef, undef, undef) + call @llvm.fmuladd.nxv16f64( undef, undef, undef) + ret void +} + declare half @llvm.copysign.f16(half, half) declare float @llvm.copysign.f32(float, float) declare double @llvm.copysign.f64(double, double) @@ -722,3 +783,31 @@ declare @llvm.fma.nxv2f64(, , ) declare @llvm.fma.nxv4f64(, , ) declare @llvm.fma.nxv8f64(, , ) + +declare half @llvm.fmuladd.f16(half, half, half) +declare float @llvm.fmuladd.f32(float, float, float) +declare double @llvm.fmuladd.f64(double, double, double) +declare <2 x half> @llvm.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>) +declare <4 x half> @llvm.fmuladd.v4f16(<4 x half>, <4 x half>, <4 x half>) +declare <8 x half> @llvm.fmuladd.v8f16(<8 x half>, <8 x half>, <8 x half>) +declare <16 x half> @llvm.fmuladd.v16f16(<16 x half>, <16 x half>, <16 x half>) +declare <2 x float> @llvm.fmuladd.v2f32(<2 x float>, <2 x float>, <2 x float>) +declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) +declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>) +declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>) +declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>) +declare <4 x double> @llvm.fmuladd.v4f64(<4 x double>, <4 x double>, <4 x double>) +declare <8 x double> @llvm.fmuladd.v8f64(<8 x double>, <8 x double>, <8 x double>) +declare <16 x double> @llvm.fmuladd.v16f64(<16 x double>, <16 x double>, <16 x double>) +declare @llvm.fmuladd.nxv2f16(, , ) +declare @llvm.fmuladd.nxv4f16(, , ) +declare @llvm.fmuladd.nxv8f16(, , ) +declare @llvm.fmuladd.nxv16f16(, , ) +declare @llvm.fmuladd.nxv2f32(, , ) +declare @llvm.fmuladd.nxv4f32(, , ) +declare @llvm.fmuladd.nxv8f32(, , ) +declare @llvm.fmuladd.nxv16f32(, , ) +declare @llvm.fmuladd.nxv2f64(, , ) +declare @llvm.fmuladd.nxv4f64(, , ) +declare @llvm.fmuladd.nxv8f64(, , ) +declare @llvm.fmuladd.nxv16f64(, , )