diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -2588,10 +2588,6 @@ {ISD::ADD, MVT::v8i16, 2}, {ISD::ADD, MVT::v4i32, 2}, {ISD::ADD, MVT::v2i64, 2}, - {ISD::FADD,MVT::v4f16, 2}, - {ISD::FADD,MVT::v8f16, 2}, - {ISD::FADD,MVT::v4f32, 2}, - {ISD::FADD,MVT::v2f64, 2}, {ISD::OR, MVT::v8i8, 15}, {ISD::OR, MVT::v16i8, 17}, {ISD::OR, MVT::v4i16, 7}, @@ -2618,6 +2614,30 @@ default: break; case ISD::FADD: + { + Type *EltType = ValTy->getElementType(); + if (!EltType->isDoubleTy() && !EltType->isFloatTy() && !EltType->isHalfTy()) + break; + + unsigned NumVecElts = cast(ValTy)->getNumElements(); + unsigned int RoundedNumVecElts = pow(2, Log2_32_Ceil(NumVecElts)); + unsigned int BitsPerElement = EltType->getScalarSizeInBits(); + unsigned int ElementsPerRegister = 128 / BitsPerElement; + + // While there are more elements than fit into a single register, + // We will do element-wise vector adds to reduce + InstructionCost VectorFADD; + unsigned int ElementsLeft = RoundedNumVecElts; + while (ElementsLeft > ElementsPerRegister) { + VectorFADD += ElementsLeft / (ElementsPerRegister * 2); + ElementsLeft /= 2; + } + + // if elements don't exceed 1 vector register size, they will only be reduced by FADDPs + InstructionCost FADDPCosts(Log2_32_Ceil(ElementsLeft)); + + return FADDPCosts + VectorFADD; + } case ISD::ADD: if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy)) return (LT.first - 1) + Entry->Cost; diff --git a/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll b/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll --- a/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll +++ b/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll @@ -3,20 +3,14 @@ define void @strict_fp_reductions() { ; CHECK-LABEL: 'strict_fp_reductions' -; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 45 for instruction: %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %fadd_v4f32 = call float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %fadd_v8f32 = call float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %fadd_v2f64 = call double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %fadd_v4f64 = call double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0.0, <4 x half> undef) - %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0.0, <8 x half> undef) - %fadd_v4f32 = call float @llvm.vector.reduce.fadd.v4f32(float 0.0, <4 x float> undef) - %fadd_v8f32 = call float @llvm.vector.reduce.fadd.v8f32(float 0.0, <8 x float> undef) - %fadd_v2f64 = call double @llvm.vector.reduce.fadd.v2f64(double 0.0, <2 x double> undef) - %fadd_v4f64 = call double @llvm.vector.reduce.fadd.v4f64(double 0.0, <4 x double> undef) + ; %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0.0, <4 x half> undef) + ; %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0.0, <8 x half> undef) + ; %fadd_v4f32 = call float @llvm.vector.reduce.fadd.v4f32(float 0.0, <4 x float> undef) + ; %fadd_v8f32 = call float @llvm.vector.reduce.fadd.v8f32(float 0.0, <8 x float> undef) + ; %fadd_v2f64 = call double @llvm.vector.reduce.fadd.v2f64(double 0.0, <2 x double> undef) + ; %fadd_v4f64 = call double @llvm.vector.reduce.fadd.v4f64(double 0.0, <4 x double> undef) ret void } @@ -24,24 +18,24 @@ define void @fast_fp_reductions() { ; CHECK-LABEL: 'fast_fp_reductions' -; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %fadd_v4f32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %fadd_v4f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %fadd_v8f32 = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %fadd_v8f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 50 for instruction: %fadd_v13f32 = call fast float @llvm.vector.reduce.fadd.v13f32(float 0.000000e+00, <13 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %fadd_v5f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v5f32(float 0.000000e+00, <5 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v2f64 = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v2f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %fadd_v4f64 = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %fadd_v4f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %fadd_v7f64 = call fast double @llvm.vector.reduce.fadd.v7f64(double 0.000000e+00, <7 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %fadd_v9f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v9f64(double 0.000000e+00, <9 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f32 = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %fadd_v13f32 = call fast float @llvm.vector.reduce.fadd.v13f32(float 0.000000e+00, <13 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v5f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v5f32(float 0.000000e+00, <5 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f64 = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f64 = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v7f64 = call fast double @llvm.vector.reduce.fadd.v7f64(double 0.000000e+00, <7 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v9f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v9f64(double 0.000000e+00, <9 x double> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0.0, <4 x half> undef) diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/dot_prodcut_test.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/dot_prodcut_test.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LowerMatrixIntrinsics/dot_prodcut_test.ll @@ -0,0 +1,199 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; REQUIRES: aarch64-registered-target +; RUN: opt -passes='lower-matrix-intrinsics' -mtriple=arm64-apple-iphoneos -S < %s | FileCheck %s + +define <1 x float> @dotproduct_float_v6(<6 x float> %a, <6 x float> %b) { +; CHECK-LABEL: @dotproduct_float_v6( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = fmul <6 x float> [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = call fast float @llvm.vector.reduce.fadd.v6f32(float 0.000000e+00, <6 x float> [[TMP0]]) +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <1 x float> poison, float [[TMP1]], i64 0 +; CHECK-NEXT: ret <1 x float> [[TMP2]] +; +entry: + %c = tail call fast <1 x float> @llvm.matrix.multiply.v1f32.v6f32.v6f32(<6 x float> %a, <6 x float> %b, i32 1, i32 6, i32 1) + ret <1 x float> %c +} + +declare <1 x float> @llvm.matrix.multiply.v1f32.v6f32.v6f32(<6 x float>, <6 x float>, i32, i32, i32) + +define <1 x float> @dotproduct_float_v1(<1 x float> %a, <1 x float> %b) { +; CHECK-LABEL: @dotproduct_float_v1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SPLIT:%.*]] = shufflevector <1 x float> [[A:%.*]], <1 x float> poison, <1 x i32> zeroinitializer +; CHECK-NEXT: [[SPLIT1:%.*]] = shufflevector <1 x float> [[B:%.*]], <1 x float> poison, <1 x i32> zeroinitializer +; CHECK-NEXT: [[BLOCK:%.*]] = shufflevector <1 x float> [[SPLIT]], <1 x float> poison, <1 x i32> zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x float> [[SPLIT1]], i64 0 +; CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x float> poison, float [[TMP0]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT]], <1 x float> poison, <1 x i32> zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = fmul fast <1 x float> [[BLOCK]], [[SPLAT_SPLAT]] +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <1 x float> [[TMP1]], <1 x float> poison, <1 x i32> zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <1 x float> undef, <1 x float> [[TMP2]], <1 x i32> +; CHECK-NEXT: ret <1 x float> [[TMP3]] +; +entry: + %c = tail call fast <1 x float> @llvm.matrix.multiply.v1f32.v1f32.v1f32(<1 x float> %a, <1 x float> %b, i32 1, i32 1, i32 1) + ret <1 x float> %c +} + +declare <1 x float> @llvm.matrix.multiply.v1f32.v1f32.v1f32(<1 x float>, <1 x float>, i32, i32, i32) + +define <1 x float> @dotproduct_float_v3(<3 x float> %a, <3 x float> %b) { +; CHECK-LABEL: @dotproduct_float_v3( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = fmul <3 x float> [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = call fast float @llvm.vector.reduce.fadd.v3f32(float 0.000000e+00, <3 x float> [[TMP0]]) +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <1 x float> poison, float [[TMP1]], i64 0 +; CHECK-NEXT: ret <1 x float> [[TMP2]] +; +entry: + %c = tail call fast <1 x float> @llvm.matrix.multiply.v1f32.v3f32.v3f32(<3 x float> %a, <3 x float> %b, i32 1, i32 3, i32 1) + ret <1 x float> %c +} + +declare <1 x float> @llvm.matrix.multiply.v1f32.v3f32.v3f32(<3 x float>, <3 x float>, i32, i32, i32) + +define <1 x float> @intrinsic_column_major_load_dot_product_float_v6(ptr %lhs_address, ptr %rhs_address) { +; CHECK-LABEL: @intrinsic_column_major_load_dot_product_float_v6( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <6 x float>, ptr [[LHS_ADDRESS:%.*]], align 32 +; CHECK-NEXT: [[TMP1:%.*]] = load <6 x float>, ptr [[RHS_ADDRESS:%.*]], align 32 +; CHECK-NEXT: [[TMP2:%.*]] = fmul <6 x float> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = call fast float @llvm.vector.reduce.fadd.v6f32(float 0.000000e+00, <6 x float> [[TMP2]]) +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <1 x float> poison, float [[TMP3]], i64 0 +; CHECK-NEXT: ret <1 x float> [[TMP4]] +; +entry: + %lhs = tail call fast <6 x float> @llvm.matrix.column.major.load.v6f32.i64(ptr nonnull align 4 %lhs_address, i64 6, i1 false, i32 6, i32 1) + %rhs = tail call fast <6 x float> @llvm.matrix.column.major.load.v6f32.i64(ptr nonnull align 4 %rhs_address, i64 6, i1 false, i32 1, i32 6) + %result = tail call fast <1 x float> @llvm.matrix.multiply.v1f32.v6f32.v6f32(<6 x float> %lhs, <6 x float> %rhs, i32 1, i32 6, i32 1) + ret <1 x float> %result +} + +declare <6 x float> @llvm.matrix.column.major.load.v6f32.i64(ptr nonnull align 4, i64, i1, i32, i32) + +define <1 x float> @LoadInst_dot_product_float_v7(ptr %lhs_address, ptr %rhs_address) { +; CHECK-LABEL: @LoadInst_dot_product_float_v7( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LHS:%.*]] = load <7 x float>, ptr [[LHS_ADDRESS:%.*]], align 32 +; CHECK-NEXT: [[RHS:%.*]] = load <7 x float>, ptr [[RHS_ADDRESS:%.*]], align 32 +; CHECK-NEXT: [[TMP0:%.*]] = fmul <7 x float> [[LHS]], [[RHS]] +; CHECK-NEXT: [[TMP1:%.*]] = call fast float @llvm.vector.reduce.fadd.v7f32(float 0.000000e+00, <7 x float> [[TMP0]]) +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <1 x float> poison, float [[TMP1]], i64 0 +; CHECK-NEXT: ret <1 x float> [[TMP2]] +; +entry: + %lhs = load <7 x float>, ptr %lhs_address + %rhs = load <7 x float>, ptr %rhs_address + %c = tail call fast <1 x float> @llvm.matrix.multiply.v1f32.v7f32.v7f32(<7 x float> %lhs, <7 x float> %rhs, i32 1, i32 7, i32 1) + ret <1 x float> %c +} + +declare <1 x float> @llvm.matrix.multiply.v1f32.v7f32.v7f32(<7 x float>, <7 x float>, i32, i32, i32) + +define <1 x double> @dotproduct_double_v6(<6 x double> %a, <6 x double> %b) { +; CHECK-LABEL: @dotproduct_double_v6( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = fmul <6 x double> [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.vector.reduce.fadd.v6f64(double 0.000000e+00, <6 x double> [[TMP0]]) +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <1 x double> poison, double [[TMP1]], i64 0 +; CHECK-NEXT: ret <1 x double> [[TMP2]] +; +entry: + %c = tail call fast <1 x double> @llvm.matrix.multiply.v1f64.v6f64.v6f64(<6 x double> %a, <6 x double> %b, i32 1, i32 6, i32 1) + ret <1 x double> %c +} + +declare <1 x double> @llvm.matrix.multiply.v1f64.v6f64.v6f64(<6 x double>, <6 x double>, i32, i32, i32) + +define <1 x double> @intrinsic_column_major_load_dot_product_double_v6(ptr %lhs_address, ptr %rhs_address) { +; CHECK-LABEL: @intrinsic_column_major_load_dot_product_double_v6( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <6 x double>, ptr [[LHS_ADDRESS:%.*]], align 64 +; CHECK-NEXT: [[TMP1:%.*]] = load <6 x double>, ptr [[RHS_ADDRESS:%.*]], align 64 +; CHECK-NEXT: [[TMP2:%.*]] = fmul <6 x double> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = call fast double @llvm.vector.reduce.fadd.v6f64(double 0.000000e+00, <6 x double> [[TMP2]]) +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <1 x double> poison, double [[TMP3]], i64 0 +; CHECK-NEXT: ret <1 x double> [[TMP4]] +; +entry: + %lhs = tail call fast <6 x double> @llvm.matrix.column.major.load.v6f64.i64(ptr nonnull align 4 %lhs_address, i64 6, i1 false, i32 6, i32 1) + %rhs = tail call fast <6 x double> @llvm.matrix.column.major.load.v6f64.i64(ptr nonnull align 4 %rhs_address, i64 6, i1 false, i32 1, i32 6) + %result = tail call fast <1 x double> @llvm.matrix.multiply.v1f64.v6f64.v6f64(<6 x double> %lhs, <6 x double> %rhs, i32 1, i32 6, i32 1) + ret <1 x double> %result +} + +declare <6 x double> @llvm.matrix.column.major.load.v6f64.i64(ptr nonnull align 4, i64, i1, i32, i32) + +define <1 x double> @LoadInst_dot_product_double_v7(ptr %lhs_address, ptr %rhs_address) { +; CHECK-LABEL: @LoadInst_dot_product_double_v7( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LHS:%.*]] = load <7 x double>, ptr [[LHS_ADDRESS:%.*]], align 64 +; CHECK-NEXT: [[RHS:%.*]] = load <7 x double>, ptr [[RHS_ADDRESS:%.*]], align 64 +; CHECK-NEXT: [[TMP0:%.*]] = fmul <7 x double> [[LHS]], [[RHS]] +; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.vector.reduce.fadd.v7f64(double 0.000000e+00, <7 x double> [[TMP0]]) +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <1 x double> poison, double [[TMP1]], i64 0 +; CHECK-NEXT: ret <1 x double> [[TMP2]] +; +entry: + %lhs = load <7 x double>, ptr %lhs_address + %rhs = load <7 x double>, ptr %rhs_address + %c = tail call fast <1 x double> @llvm.matrix.multiply.v1f64.v7f64.v7f64(<7 x double> %lhs, <7 x double> %rhs, i32 1, i32 7, i32 1) + ret <1 x double> %c +} + +declare <1 x double> @llvm.matrix.multiply.v1f64.v7f64.v7f64(<7 x double>, <7 x double>, i32, i32, i32) + +define <1 x i64> @dotproduct_i64_v8(<8 x i64> %a, <8 x i64> %b) { +; CHECK-LABEL: @dotproduct_i64_v8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = mul <8 x i64> [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP0]]) +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <1 x i64> poison, i64 [[TMP1]], i64 0 +; CHECK-NEXT: ret <1 x i64> [[TMP2]] +; +entry: + %c = tail call <1 x i64> @llvm.matrix.multiply.v1i64.v8i64.v8i64(<8 x i64> %a, <8 x i64> %b, i32 1, i32 8, i32 1) + ret <1 x i64> %c +} + +declare <1 x i64> @llvm.matrix.multiply.v1i64.v8i64.v8i64(<8 x i64>, <8 x i64>, i32, i32, i32) + +define <1 x i32> @intrinsic_column_major_load_dot_product_i32_v8(ptr %lhs_address, ptr %rhs_address) { +; CHECK-LABEL: @intrinsic_column_major_load_dot_product_i32_v8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i32>, ptr [[LHS_ADDRESS:%.*]], align 32 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[RHS_ADDRESS:%.*]], align 32 +; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i32> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]]) +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <1 x i32> poison, i32 [[TMP3]], i64 0 +; CHECK-NEXT: ret <1 x i32> [[TMP4]] +; +entry: + %lhs = tail call <8 x i32> @llvm.matrix.column.major.load.v8i32.i64(ptr nonnull align 4 %lhs_address, i64 8, i1 false, i32 8, i32 1) + %rhs = tail call <8 x i32> @llvm.matrix.column.major.load.v8i32.i64(ptr nonnull align 4 %rhs_address, i64 8, i1 false, i32 1, i32 8) + %result = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %lhs, <8 x i32> %rhs, i32 1, i32 8, i32 1) + ret <1 x i32> %result +} + +declare <8 x i32> @llvm.matrix.column.major.load.v8i32.i64(ptr nonnull align 4, i64, i1, i32, i32) +declare <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32>, <8 x i32>, i32, i32, i32) + +define <1 x i16> @LoadInst_dot_product_i16_v6(ptr %lhs_address, ptr %rhs_address) { +; CHECK-LABEL: @LoadInst_dot_product_i16_v6( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LHS:%.*]] = load <6 x i16>, ptr [[LHS_ADDRESS:%.*]], align 16 +; CHECK-NEXT: [[RHS:%.*]] = load <6 x i16>, ptr [[RHS_ADDRESS:%.*]], align 16 +; CHECK-NEXT: [[TMP0:%.*]] = mul <6 x i16> [[LHS]], [[RHS]] +; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.vector.reduce.add.v6i16(<6 x i16> [[TMP0]]) +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <1 x i16> poison, i16 [[TMP1]], i64 0 +; CHECK-NEXT: ret <1 x i16> [[TMP2]] +; +entry: + %lhs = load <6 x i16>, ptr %lhs_address + %rhs = load <6 x i16>, ptr %rhs_address + %result = tail call <1 x i16> @llvm.matrix.multiply.v1i16.v6i16.v6i16(<6 x i16> %lhs, <6 x i16> %rhs, i32 1, i32 6, i32 1) + ret <1 x i16> %result +} + +declare <1 x i16> @llvm.matrix.multiply.v1i16.v6i16.v6i16(<6 x i16>, <6 x i16>, i32, i32, i32)