diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp --- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp +++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp @@ -56,6 +56,16 @@ cl::desc("Allow the use of FMAs if available and profitable. This may " "result in different results, due to less rounding error.")); +enum class MatrixLayoutTy { ColumnMajor, RowMajor }; + +static cl::opt MatrixLayout( + "matrix-default-layout", cl::init(MatrixLayoutTy::ColumnMajor), + cl::desc("Sets the default matrix layout"), + cl::values(clEnumValN(MatrixLayoutTy::ColumnMajor, "column-major", + "Use column-major layout"), + clEnumValN(MatrixLayoutTy::RowMajor, "row-major", + "Use row-major layout"))); + /// Helper function to either return Scope, if it is a subprogram or the /// attached subprogram for a local scope. static DISubprogram *getSubprogram(DIScope *Scope) { @@ -186,20 +196,41 @@ bool IsColumnMajor = true; public: - MatrixTy() : Vectors() {} + MatrixTy() + : Vectors(), + IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {} MatrixTy(ArrayRef Vectors) - : Vectors(Vectors.begin(), Vectors.end()) {} + : Vectors(Vectors.begin(), Vectors.end()), + IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {} + MatrixTy(unsigned NumRows, unsigned NumColumns, Type *EltTy) + : IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) { + + unsigned D = isColumnMajor() ? NumColumns : NumRows; + for (unsigned J = 0; J < D; ++J) + addVector(UndefValue::get( + VectorType::get(EltTy, isColumnMajor() ? NumRows : NumColumns))); + } Value *getVector(unsigned i) const { return Vectors[i]; } Value *getColumn(unsigned i) const { assert(isColumnMajor() && "only supported for column-major matrixes"); return Vectors[i]; } + Value *getRow(unsigned i) const { + assert(!isColumnMajor() && "only supported for row-major matrixes"); + return Vectors[i]; + } void setColumn(unsigned i, Value *V) { Vectors[i] = V; } Type *getElementType() { return getVectorTy()->getElementType(); } + unsigned getNumVectors() const { + if (isColumnMajor()) + return getNumColumns(); + return getNumRows(); + } + unsigned getNumColumns() const { if (isColumnMajor()) return Vectors.size(); @@ -222,6 +253,7 @@ void addColumn(Value *V) { Vectors.push_back(V); } + void addVector(Value *V) { Vectors.push_back(V); } VectorType *getColumnTy() { assert(isColumnMajor() && "only supported for column-major matrixes"); return getVectorTy(); @@ -235,6 +267,10 @@ return make_range(Vectors.begin(), Vectors.end()); } + iterator_range::iterator> vectors() { + return make_range(Vectors.begin(), Vectors.end()); + } + /// Embed the columns of the matrix into a flat vector by concatenating /// them. Value *embedInVector(IRBuilder<> &Builder) const { @@ -266,18 +302,38 @@ const OpInfoTy &getOpInfo() const { return OpInfo; } bool isColumnMajor() const { return IsColumnMajor; } + + unsigned getStride() const { + if (isColumnMajor()) + return getNumRows(); + return getNumColumns(); + } + + /// Extract a column vector of \p NumElts starting at index (\p I, \p J) + /// from the matrix \p LM represented as a vector of column vectors. + Value *extractVector(unsigned I, unsigned J, unsigned NumElts, + IRBuilder<> &Builder) const { + Value *Vec = isColumnMajor() ? getColumn(J) : getRow(I); + Value *Undef = UndefValue::get(Vec->getType()); + Constant *Mask = + createSequentialMask(Builder, isColumnMajor() ? I : J, NumElts, 0); + return Builder.CreateShuffleVector(Vec, Undef, Mask, "block"); + } }; struct ShapeInfo { unsigned NumRows; unsigned NumColumns; + bool IsColumnMajor; + ShapeInfo(unsigned NumRows = 0, unsigned NumColumns = 0) - : NumRows(NumRows), NumColumns(NumColumns) {} + : NumRows(NumRows), NumColumns(NumColumns), + IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {} ShapeInfo(Value *NumRows, Value *NumColumns) - : NumRows(cast(NumRows)->getZExtValue()), - NumColumns(cast(NumColumns)->getZExtValue()) {} + : ShapeInfo(cast(NumRows)->getZExtValue(), + cast(NumColumns)->getZExtValue()) {} bool operator==(const ShapeInfo &other) { return NumRows == other.NumRows && NumColumns == other.NumColumns; @@ -290,6 +346,18 @@ assert(NumRows == 0 || NumColumns != 0); return NumRows != 0; } + + unsigned getStride() const { + if (IsColumnMajor) + return NumRows; + return NumColumns; + } + + unsigned getNumVectors() const { + if (IsColumnMajor) + return NumColumns; + return NumRows; + } }; /// Maps instructions to their shape information. The shape information @@ -360,8 +428,9 @@ SmallVector SplitVecs; Value *Undef = UndefValue::get(VType); for (unsigned MaskStart = 0; MaskStart < VType->getNumElements(); - MaskStart += SI.NumRows) { - Constant *Mask = createSequentialMask(Builder, MaskStart, SI.NumRows, 0); + MaskStart += SI.getStride()) { + Constant *Mask = + createSequentialMask(Builder, MaskStart, SI.getStride(), 0); Value *V = Builder.CreateShuffleVector(MatrixVal, Undef, Mask, "split"); SplitVecs.push_back(V); } @@ -711,15 +780,15 @@ Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder); MatrixTy Result; // Distance between start of one column and the start of the next - for (unsigned C = 0, E = Shape.NumColumns; C < E; ++C) { - Value *GEP = - computeColumnAddr(EltPtr, Builder.getInt32(C), Stride, Shape.NumRows, - VType->getElementType(), Builder); + for (unsigned C = 0, E = Shape.getNumVectors(); C < E; ++C) { + Value *GEP = computeColumnAddr(EltPtr, Builder.getInt32(C), Stride, + Shape.getStride(), VType->getElementType(), + Builder); Value *Column = createColumnLoad(GEP, VType->getElementType(), Builder); Result.addColumn(Column); } - return Result.addNumLoads(getNumOps(Result.getColumnTy()) * - Result.getNumColumns()); + return Result.addNumLoads(getNumOps(Result.getVectorTy()) * + Result.getNumVectors()); } /// Loads a sub-matrix with shape \p ResultShape from a \p R x \p C matrix, @@ -730,7 +799,7 @@ Value *Offset = Builder.CreateAdd( Builder.CreateMul(Builder.getInt32(J), - Builder.getInt32(MatrixShape.NumRows)), + Builder.getInt32(MatrixShape.getStride())), Builder.getInt32(I)); unsigned AS = cast(MatrixPtr->getType())->getAddressSpace(); @@ -743,7 +812,7 @@ Value *TilePtr = Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast"); - return loadMatrix(TileTy, TilePtr, Builder.getInt32(MatrixShape.NumRows), + return loadMatrix(TileTy, TilePtr, Builder.getInt32(MatrixShape.getStride()), ResultShape, Builder); } @@ -760,6 +829,8 @@ /// /// The intrinsic loads a matrix from memory using a stride between columns. void LowerColumnwiseLoad(CallInst *Inst) { + assert(MatrixLayout == MatrixLayoutTy::ColumnMajor && + "Intrinsic only supports column-major layout!"); Value *Ptr = Inst->getArgOperand(0); Value *Stride = Inst->getArgOperand(1); LowerLoad(Inst, Ptr, Stride, @@ -773,7 +844,7 @@ IRBuilder<> &Builder) { Value *Offset = Builder.CreateAdd( Builder.CreateMul(Builder.getInt32(J), - Builder.getInt32(MatrixShape.NumRows)), + Builder.getInt32(MatrixShape.getStride())), Builder.getInt32(I)); unsigned AS = cast(MatrixPtr->getType())->getAddressSpace(); @@ -787,7 +858,7 @@ Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast"); storeMatrix(TileTy, StoreVal, TilePtr, - Builder.getInt32(MatrixShape.NumRows), Builder); + Builder.getInt32(MatrixShape.getStride()), Builder); } /// Store matrix \p StoreVal starting at \p Ptr and using \p Stride between @@ -796,14 +867,14 @@ IRBuilder<> &Builder) { auto VType = cast(Ty); Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder); - for (auto C : enumerate(StoreVal.columns())) { + for (auto C : enumerate(StoreVal.vectors())) { Value *GEP = computeColumnAddr(EltPtr, Builder.getInt32(C.index()), - Stride, StoreVal.getNumRows(), + Stride, StoreVal.getStride(), VType->getElementType(), Builder); createColumnStore(C.value(), GEP, VType->getElementType(), Builder); } - return MatrixTy().addNumStores(getNumOps(StoreVal.getColumnTy()) * - StoreVal.getNumColumns()); + return MatrixTy().addNumStores(getNumOps(StoreVal.getVectorTy()) * + StoreVal.getNumVectors()); } /// Lower a store instruction with shape information. @@ -820,6 +891,8 @@ /// /// The intrinsic store a matrix back memory using a stride between columns. void LowerColumnwiseStore(CallInst *Inst) { + assert(MatrixLayout == MatrixLayoutTy::ColumnMajor && + "Intrinsic only supports column-major layout!"); Value *Matrix = Inst->getArgOperand(0); Value *Ptr = Inst->getArgOperand(1); Value *Stride = Inst->getArgOperand(2); @@ -827,16 +900,6 @@ {Inst->getArgOperand(3), Inst->getArgOperand(4)}); } - /// Extract a column vector of \p NumElts starting at index (\p I, \p J) from - /// the matrix \p LM represented as a vector of column vectors. - Value *extractVector(const MatrixTy &LM, unsigned I, unsigned J, - unsigned NumElts, IRBuilder<> &Builder) { - Value *Col = LM.getColumn(J); - Value *Undef = UndefValue::get(Col->getType()); - Constant *Mask = createSequentialMask(Builder, I, NumElts, 0); - return Builder.CreateShuffleVector(Col, Undef, Mask, "block"); - } - // Set elements I..I+NumElts-1 to Block Value *insertVector(Value *Col, unsigned I, Value *Block, IRBuilder<> &Builder) { @@ -930,33 +993,60 @@ unsigned C = Result.getNumColumns(); unsigned M = A.getNumColumns(); - for (unsigned J = 0; J < C; ++J) { - unsigned BlockSize = VF; - - // If Result is zero, we don't need to accumulate in the K==0 iteration. - bool isSumZero = isa(Result.getColumn(J)); - - unsigned NumOps = 0; - for (unsigned I = 0; I < R; I += BlockSize) { - // Gradually lower the vectorization factor to cover the remainder. - while (I + BlockSize > R) - BlockSize /= 2; - - Value *Sum = - isTiled ? extractVector(Result, I, J, BlockSize, Builder) : nullptr; - for (unsigned K = 0; K < M; ++K) { - Value *L = extractVector(A, I, K, BlockSize, Builder); - Value *RH = Builder.CreateExtractElement(B.getColumn(J), K); - Value *Splat = Builder.CreateVectorSplat(BlockSize, RH, "splat"); - Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, L, Splat, - Result.getElementType()->isFloatingPointTy(), - Builder, AllowContraction, NumOps); + bool IsFP = Result.getElementType()->isFloatingPointTy(); + assert(A.isColumnMajor() == B.isColumnMajor() && Result.isColumnMajor() == A.isColumnMajor()); + unsigned NumComputeOps = 0; + // Multiply columns from the first operand with scalars from the second + // operand. Then move along the K axes and accumulate the columns. With + // this the adds can be vectorized without reassociation. + if (A.isColumnMajor()) { + for (unsigned J = 0; J < C; ++J) { + unsigned BlockSize = VF; + // If Result is zero, we don't need to accumulate in the K==0 iteration. + bool isSumZero = isa(Result.getColumn(J)); + + for (unsigned I = 0; I < R; I += BlockSize) { + // Gradually lower the vectorization factor to cover the remainder. + while (I + BlockSize > R) + BlockSize /= 2; + + Value *Sum = isTiled ? Result.extractVector(I, J, BlockSize, Builder) + : nullptr; + for (unsigned K = 0; K < M; ++K) { + Value *L = A.extractVector(I, K, BlockSize, Builder); + Value *RH = Builder.CreateExtractElement(B.getColumn(J), K); + Value *Splat = Builder.CreateVectorSplat(BlockSize, RH, "splat"); + Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, L, Splat, + Result.getElementType()->isFloatingPointTy(), + Builder, AllowContraction, NumComputeOps); + } + Result.setColumn(J, + insertVector(Result.getVector(J), I, Sum, Builder)); + } + } + } else { + for (unsigned I = 0; I < R; ++I) { + unsigned BlockSize = VF; + bool isSumZero = isa(Result.getRow(I)); + for (unsigned J = 0; J < C; J += BlockSize) { + // Gradually lower the vectorization factor to cover the remainder. + while (J + BlockSize > C) + BlockSize /= 2; + + Value *Sum = nullptr; + for (unsigned K = 0; K < M; ++K) { + Value *R = B.extractVector(K, J, BlockSize, Builder); + Value *LH = Builder.CreateExtractElement(A.getVector(I), K); + Value *Splat = Builder.CreateVectorSplat(BlockSize, LH, "splat"); + Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, Splat, R, + IsFP, Builder, AllowContraction, NumComputeOps); + } + Result.setColumn(I, + insertVector(Result.getVector(I), J, Sum, Builder)); } - Result.setColumn(J, insertVector(Result.getColumn(J), I, Sum, Builder)); } - - Result.addNumComputeOps(NumOps); } + Result.addNumComputeOps(NumComputeOps); } /// Ensure that the memory in \p Load does not alias \p Store by potentially @@ -1077,6 +1167,7 @@ BasicBlock *emitSIMDTiling(CallInst *MatMul, LoadInst *LoadOp0, LoadInst *LoadOp1, StoreInst *Store) { + assert(MatrixLayout == MatrixLayoutTy::ColumnMajor && "Tiling only supported for column-major matrixes at the moment!"); if (!isFusionProfitable(MatMul)) return nullptr; @@ -1127,7 +1218,7 @@ /// No need to return LoweredMatrix since the single store user will be /// lowered as part of this. BasicBlock *LowerMatrixMultiplyFused(CallInst *MatMul) { - if (!FuseMatrix) + if (!FuseMatrix || MatrixLayout != MatrixLayoutTy::ColumnMajor) return nullptr; if (auto *LoadOp0 = dyn_cast(MatMul->getOperand(0))) @@ -1162,9 +1253,7 @@ assert(LShape.NumColumns == RShape.NumRows); // Initialize the output - MatrixTy Result; - for (unsigned J = 0; J < C; ++J) - Result.addColumn(UndefValue::get(VectorType::get(EltType, R))); + MatrixTy Result(R, C, EltType); bool AllowContract = AllowContractEnabled || (isa(MatMul) && MatMul->hasAllowContract()); @@ -1180,6 +1269,8 @@ VectorType *VectorTy = cast(InputVal->getType()); ShapeInfo ArgShape(Inst->getArgOperand(1), Inst->getArgOperand(2)); MatrixTy InputMatrix = getMatrix(InputVal, ArgShape, Builder); + assert(InputMatrix.isColumnMajor() && + "Row-major code-gen not supported yet!"); for (unsigned Row = 0; Row < ArgShape.NumRows; ++Row) { // Build a single column vector for this row. First initialize it. @@ -1213,7 +1304,7 @@ if (I == ShapeMap.end()) return false; - LowerLoad(Inst, Ptr, Builder.getInt32(I->second.NumRows), I->second); + LowerLoad(Inst, Ptr, Builder.getInt32(I->second.getStride()), I->second); return true; } @@ -1223,7 +1314,8 @@ if (I == ShapeMap.end()) return false; - LowerStore(Inst, StoredVal, Ptr, Builder.getInt32(I->second.NumRows), I->second); + LowerStore(Inst, StoredVal, Ptr, Builder.getInt32(I->second.getStride()), + I->second); return true; } @@ -1262,13 +1354,14 @@ llvm_unreachable("Unsupported binary operator for matrix"); } }; - for (unsigned C = 0; C < Shape.NumColumns; ++C) + + for (unsigned C = 0; C < Shape.getNumVectors(); ++C) Result.addColumn( - BuildColumnOp(LoweredLhs.getColumn(C), LoweredRhs.getColumn(C))); + BuildColumnOp(LoweredLhs.getVector(C), LoweredRhs.getVector(C))); finalizeLowering(Inst, - Result.addNumComputeOps(getNumOps(Result.getColumnTy()) * - Result.getNumColumns()), + Result.addNumComputeOps(getNumOps(Result.getVectorTy()) * + Result.getNumVectors()), Builder); return true; } diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-double-row-major.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-double-row-major.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-double-row-major.ll @@ -0,0 +1,256 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --verbose + +; RUN: opt -lower-matrix-intrinsics -matrix-default-layout=row-major -S < %s | FileCheck --check-prefix=RM %s + + + +define <4 x double> @multiply_2x2(<4 x double> %a, <4 x double> %b) { +; RM-LABEL: @multiply_2x2( +; RM-NEXT: entry: +; RM-NEXT: [[SPLIT:%.*]] = shufflevector <4 x double> [[A:%.*]], <4 x double> undef, <2 x i32> +; RM-NEXT: [[SPLIT1:%.*]] = shufflevector <4 x double> [[A]], <4 x double> undef, <2 x i32> +; RM-NEXT: [[SPLIT2:%.*]] = shufflevector <4 x double> [[B:%.*]], <4 x double> undef, <2 x i32> +; RM-NEXT: [[SPLIT3:%.*]] = shufflevector <4 x double> [[B]], <4 x double> undef, <2 x i32> +; RM-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP0:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> undef, double [[TMP0]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP1:%.*]] = fmul <1 x double> [[SPLAT_SPLAT]], [[BLOCK]] +; RM-NEXT: [[BLOCK4:%.*]] = shufflevector <2 x double> [[SPLIT3]], <2 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT5:%.*]] = insertelement <1 x double> undef, double [[TMP2]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT6:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT5]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP3:%.*]] = fmul <1 x double> [[SPLAT_SPLAT6]], [[BLOCK4]] +; RM-NEXT: [[TMP4:%.*]] = fadd <1 x double> [[TMP1]], [[TMP3]] +; RM-NEXT: [[TMP5:%.*]] = shufflevector <1 x double> [[TMP4]], <1 x double> undef, <2 x i32> +; RM-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> undef, <2 x double> [[TMP5]], <2 x i32> +; RM-NEXT: [[BLOCK7:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> +; RM-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT8:%.*]] = insertelement <1 x double> undef, double [[TMP7]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT9:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT8]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP8:%.*]] = fmul <1 x double> [[SPLAT_SPLAT9]], [[BLOCK7]] +; RM-NEXT: [[BLOCK10:%.*]] = shufflevector <2 x double> [[SPLIT3]], <2 x double> undef, <1 x i32> +; RM-NEXT: [[TMP9:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT11:%.*]] = insertelement <1 x double> undef, double [[TMP9]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT12:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT11]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP10:%.*]] = fmul <1 x double> [[SPLAT_SPLAT12]], [[BLOCK10]] +; RM-NEXT: [[TMP11:%.*]] = fadd <1 x double> [[TMP8]], [[TMP10]] +; RM-NEXT: [[TMP12:%.*]] = shufflevector <1 x double> [[TMP11]], <1 x double> undef, <2 x i32> +; RM-NEXT: [[TMP13:%.*]] = shufflevector <2 x double> [[TMP6]], <2 x double> [[TMP12]], <2 x i32> +; RM-NEXT: [[BLOCK13:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT14:%.*]] = insertelement <1 x double> undef, double [[TMP14]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT15:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT14]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP15:%.*]] = fmul <1 x double> [[SPLAT_SPLAT15]], [[BLOCK13]] +; RM-NEXT: [[BLOCK16:%.*]] = shufflevector <2 x double> [[SPLIT3]], <2 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP16:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT17:%.*]] = insertelement <1 x double> undef, double [[TMP16]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT18:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT17]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP17:%.*]] = fmul <1 x double> [[SPLAT_SPLAT18]], [[BLOCK16]] +; RM-NEXT: [[TMP18:%.*]] = fadd <1 x double> [[TMP15]], [[TMP17]] +; RM-NEXT: [[TMP19:%.*]] = shufflevector <1 x double> [[TMP18]], <1 x double> undef, <2 x i32> +; RM-NEXT: [[TMP20:%.*]] = shufflevector <2 x double> undef, <2 x double> [[TMP19]], <2 x i32> +; RM-NEXT: [[BLOCK19:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> +; RM-NEXT: [[TMP21:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <1 x double> undef, double [[TMP21]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT21:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT20]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP22:%.*]] = fmul <1 x double> [[SPLAT_SPLAT21]], [[BLOCK19]] +; RM-NEXT: [[BLOCK22:%.*]] = shufflevector <2 x double> [[SPLIT3]], <2 x double> undef, <1 x i32> +; RM-NEXT: [[TMP23:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT23:%.*]] = insertelement <1 x double> undef, double [[TMP23]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT24:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT23]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP24:%.*]] = fmul <1 x double> [[SPLAT_SPLAT24]], [[BLOCK22]] +; RM-NEXT: [[TMP25:%.*]] = fadd <1 x double> [[TMP22]], [[TMP24]] +; RM-NEXT: [[TMP26:%.*]] = shufflevector <1 x double> [[TMP25]], <1 x double> undef, <2 x i32> +; RM-NEXT: [[TMP27:%.*]] = shufflevector <2 x double> [[TMP20]], <2 x double> [[TMP26]], <2 x i32> +; RM-NEXT: [[TMP28:%.*]] = shufflevector <2 x double> [[TMP13]], <2 x double> [[TMP27]], <4 x i32> +; RM-NEXT: ret <4 x double> [[TMP28]] +; +entry: + %c = call <4 x double> @llvm.matrix.multiply.v4f64.v4f64.v4f64(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2) + ret <4 x double> %c +} + +declare <4 x double> @llvm.matrix.multiply.v4f64.v4f64.v4f64(<4 x double>, <4 x double>, i32, i32, i32) + +define <4 x double> @multiply_1x2(<2 x double> %a, <2 x double> %b) { + +; RM-LABEL: @multiply_1x2( +; RM-NEXT: entry: +; RM-NEXT: [[SPLIT:%.*]] = shufflevector <2 x double> [[A:%.*]], <2 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[SPLIT1:%.*]] = shufflevector <2 x double> [[A]], <2 x double> undef, <1 x i32> +; RM-NEXT: [[SPLIT2:%.*]] = shufflevector <2 x double> [[B:%.*]], <2 x double> undef, <2 x i32> +; RM-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP0:%.*]] = extractelement <1 x double> [[SPLIT]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> undef, double [[TMP0]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP1:%.*]] = fmul <1 x double> [[SPLAT_SPLAT]], [[BLOCK]] +; RM-NEXT: [[TMP2:%.*]] = shufflevector <1 x double> [[TMP1]], <1 x double> undef, <2 x i32> +; RM-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> undef, <2 x double> [[TMP2]], <2 x i32> +; RM-NEXT: [[BLOCK3:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> +; RM-NEXT: [[TMP4:%.*]] = extractelement <1 x double> [[SPLIT]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT4:%.*]] = insertelement <1 x double> undef, double [[TMP4]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT5:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT4]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP5:%.*]] = fmul <1 x double> [[SPLAT_SPLAT5]], [[BLOCK3]] +; RM-NEXT: [[TMP6:%.*]] = shufflevector <1 x double> [[TMP5]], <1 x double> undef, <2 x i32> +; RM-NEXT: [[TMP7:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[TMP6]], <2 x i32> +; RM-NEXT: [[BLOCK6:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP8:%.*]] = extractelement <1 x double> [[SPLIT1]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT7:%.*]] = insertelement <1 x double> undef, double [[TMP8]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT8:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT7]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP9:%.*]] = fmul <1 x double> [[SPLAT_SPLAT8]], [[BLOCK6]] +; RM-NEXT: [[TMP10:%.*]] = shufflevector <1 x double> [[TMP9]], <1 x double> undef, <2 x i32> +; RM-NEXT: [[TMP11:%.*]] = shufflevector <2 x double> undef, <2 x double> [[TMP10]], <2 x i32> +; RM-NEXT: [[BLOCK9:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> +; RM-NEXT: [[TMP12:%.*]] = extractelement <1 x double> [[SPLIT1]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT10:%.*]] = insertelement <1 x double> undef, double [[TMP12]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT11:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT10]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP13:%.*]] = fmul <1 x double> [[SPLAT_SPLAT11]], [[BLOCK9]] +; RM-NEXT: [[TMP14:%.*]] = shufflevector <1 x double> [[TMP13]], <1 x double> undef, <2 x i32> +; RM-NEXT: [[TMP15:%.*]] = shufflevector <2 x double> [[TMP11]], <2 x double> [[TMP14]], <2 x i32> +; RM-NEXT: [[TMP16:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> [[TMP15]], <4 x i32> +; RM-NEXT: ret <4 x double> [[TMP16]] +; +entry: + %c = call <4 x double> @llvm.matrix.multiply.v4f64.v2f64.v2f64(<2 x double> %a, <2 x double> %b, i32 2, i32 1, i32 2) + ret <4 x double> %c +} + +declare <4 x double> @llvm.matrix.multiply.v4f64.v2f64.v2f64(<2 x double>, <2 x double>, i32, i32, i32) + +define <9 x double> @multiply_2x3(<6 x double> %a, <6 x double> %b) { +; RM-LABEL: @multiply_2x3( +; RM-NEXT: entry: +; RM-NEXT: [[SPLIT:%.*]] = shufflevector <6 x double> [[A:%.*]], <6 x double> undef, <2 x i32> +; RM-NEXT: [[SPLIT1:%.*]] = shufflevector <6 x double> [[A]], <6 x double> undef, <2 x i32> +; RM-NEXT: [[SPLIT2:%.*]] = shufflevector <6 x double> [[A]], <6 x double> undef, <2 x i32> +; RM-NEXT: [[SPLIT3:%.*]] = shufflevector <6 x double> [[B:%.*]], <6 x double> undef, <3 x i32> +; RM-NEXT: [[SPLIT4:%.*]] = shufflevector <6 x double> [[B]], <6 x double> undef, <3 x i32> +; RM-NEXT: [[BLOCK:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP0:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> undef, double [[TMP0]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP1:%.*]] = fmul <1 x double> [[SPLAT_SPLAT]], [[BLOCK]] +; RM-NEXT: [[BLOCK5:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <1 x double> undef, double [[TMP2]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT6]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP3:%.*]] = fmul <1 x double> [[SPLAT_SPLAT7]], [[BLOCK5]] +; RM-NEXT: [[TMP4:%.*]] = fadd <1 x double> [[TMP1]], [[TMP3]] +; RM-NEXT: [[TMP5:%.*]] = shufflevector <1 x double> [[TMP4]], <1 x double> undef, <3 x i32> +; RM-NEXT: [[TMP6:%.*]] = shufflevector <3 x double> undef, <3 x double> [[TMP5]], <3 x i32> +; RM-NEXT: [[BLOCK8:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT9:%.*]] = insertelement <1 x double> undef, double [[TMP7]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT10:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT9]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP8:%.*]] = fmul <1 x double> [[SPLAT_SPLAT10]], [[BLOCK8]] +; RM-NEXT: [[BLOCK11:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP9:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT12:%.*]] = insertelement <1 x double> undef, double [[TMP9]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT13:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT12]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP10:%.*]] = fmul <1 x double> [[SPLAT_SPLAT13]], [[BLOCK11]] +; RM-NEXT: [[TMP11:%.*]] = fadd <1 x double> [[TMP8]], [[TMP10]] +; RM-NEXT: [[TMP12:%.*]] = shufflevector <1 x double> [[TMP11]], <1 x double> undef, <3 x i32> +; RM-NEXT: [[TMP13:%.*]] = shufflevector <3 x double> [[TMP6]], <3 x double> [[TMP12]], <3 x i32> +; RM-NEXT: [[BLOCK14:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT15:%.*]] = insertelement <1 x double> undef, double [[TMP14]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT15]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP15:%.*]] = fmul <1 x double> [[SPLAT_SPLAT16]], [[BLOCK14]] +; RM-NEXT: [[BLOCK17:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP16:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT18:%.*]] = insertelement <1 x double> undef, double [[TMP16]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT18]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP17:%.*]] = fmul <1 x double> [[SPLAT_SPLAT19]], [[BLOCK17]] +; RM-NEXT: [[TMP18:%.*]] = fadd <1 x double> [[TMP15]], [[TMP17]] +; RM-NEXT: [[TMP19:%.*]] = shufflevector <1 x double> [[TMP18]], <1 x double> undef, <3 x i32> +; RM-NEXT: [[TMP20:%.*]] = shufflevector <3 x double> [[TMP13]], <3 x double> [[TMP19]], <3 x i32> +; RM-NEXT: [[BLOCK20:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP21:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT21:%.*]] = insertelement <1 x double> undef, double [[TMP21]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT22:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT21]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP22:%.*]] = fmul <1 x double> [[SPLAT_SPLAT22]], [[BLOCK20]] +; RM-NEXT: [[BLOCK23:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP23:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT24:%.*]] = insertelement <1 x double> undef, double [[TMP23]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT25:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT24]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP24:%.*]] = fmul <1 x double> [[SPLAT_SPLAT25]], [[BLOCK23]] +; RM-NEXT: [[TMP25:%.*]] = fadd <1 x double> [[TMP22]], [[TMP24]] +; RM-NEXT: [[TMP26:%.*]] = shufflevector <1 x double> [[TMP25]], <1 x double> undef, <3 x i32> +; RM-NEXT: [[TMP27:%.*]] = shufflevector <3 x double> undef, <3 x double> [[TMP26]], <3 x i32> +; RM-NEXT: [[BLOCK26:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP28:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <1 x double> undef, double [[TMP28]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT28:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT27]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP29:%.*]] = fmul <1 x double> [[SPLAT_SPLAT28]], [[BLOCK26]] +; RM-NEXT: [[BLOCK29:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP30:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT30:%.*]] = insertelement <1 x double> undef, double [[TMP30]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT31:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT30]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP31:%.*]] = fmul <1 x double> [[SPLAT_SPLAT31]], [[BLOCK29]] +; RM-NEXT: [[TMP32:%.*]] = fadd <1 x double> [[TMP29]], [[TMP31]] +; RM-NEXT: [[TMP33:%.*]] = shufflevector <1 x double> [[TMP32]], <1 x double> undef, <3 x i32> +; RM-NEXT: [[TMP34:%.*]] = shufflevector <3 x double> [[TMP27]], <3 x double> [[TMP33]], <3 x i32> +; RM-NEXT: [[BLOCK32:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP35:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <1 x double> undef, double [[TMP35]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT34:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT33]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP36:%.*]] = fmul <1 x double> [[SPLAT_SPLAT34]], [[BLOCK32]] +; RM-NEXT: [[BLOCK35:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP37:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT36:%.*]] = insertelement <1 x double> undef, double [[TMP37]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT37:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT36]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP38:%.*]] = fmul <1 x double> [[SPLAT_SPLAT37]], [[BLOCK35]] +; RM-NEXT: [[TMP39:%.*]] = fadd <1 x double> [[TMP36]], [[TMP38]] +; RM-NEXT: [[TMP40:%.*]] = shufflevector <1 x double> [[TMP39]], <1 x double> undef, <3 x i32> +; RM-NEXT: [[TMP41:%.*]] = shufflevector <3 x double> [[TMP34]], <3 x double> [[TMP40]], <3 x i32> +; RM-NEXT: [[BLOCK38:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP42:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <1 x double> undef, double [[TMP42]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT40:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT39]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP43:%.*]] = fmul <1 x double> [[SPLAT_SPLAT40]], [[BLOCK38]] +; RM-NEXT: [[BLOCK41:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP44:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT42:%.*]] = insertelement <1 x double> undef, double [[TMP44]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT43:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT42]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP45:%.*]] = fmul <1 x double> [[SPLAT_SPLAT43]], [[BLOCK41]] +; RM-NEXT: [[TMP46:%.*]] = fadd <1 x double> [[TMP43]], [[TMP45]] +; RM-NEXT: [[TMP47:%.*]] = shufflevector <1 x double> [[TMP46]], <1 x double> undef, <3 x i32> +; RM-NEXT: [[TMP48:%.*]] = shufflevector <3 x double> undef, <3 x double> [[TMP47]], <3 x i32> +; RM-NEXT: [[BLOCK44:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP49:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT45:%.*]] = insertelement <1 x double> undef, double [[TMP49]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT46:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT45]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP50:%.*]] = fmul <1 x double> [[SPLAT_SPLAT46]], [[BLOCK44]] +; RM-NEXT: [[BLOCK47:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP51:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT48:%.*]] = insertelement <1 x double> undef, double [[TMP51]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT49:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT48]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP52:%.*]] = fmul <1 x double> [[SPLAT_SPLAT49]], [[BLOCK47]] +; RM-NEXT: [[TMP53:%.*]] = fadd <1 x double> [[TMP50]], [[TMP52]] +; RM-NEXT: [[TMP54:%.*]] = shufflevector <1 x double> [[TMP53]], <1 x double> undef, <3 x i32> +; RM-NEXT: [[TMP55:%.*]] = shufflevector <3 x double> [[TMP48]], <3 x double> [[TMP54]], <3 x i32> +; RM-NEXT: [[BLOCK50:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP56:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 0 +; RM-NEXT: [[SPLAT_SPLATINSERT51:%.*]] = insertelement <1 x double> undef, double [[TMP56]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT52:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT51]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP57:%.*]] = fmul <1 x double> [[SPLAT_SPLAT52]], [[BLOCK50]] +; RM-NEXT: [[BLOCK53:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> +; RM-NEXT: [[TMP58:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 1 +; RM-NEXT: [[SPLAT_SPLATINSERT54:%.*]] = insertelement <1 x double> undef, double [[TMP58]], i32 0 +; RM-NEXT: [[SPLAT_SPLAT55:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT54]], <1 x double> undef, <1 x i32> zeroinitializer +; RM-NEXT: [[TMP59:%.*]] = fmul <1 x double> [[SPLAT_SPLAT55]], [[BLOCK53]] +; RM-NEXT: [[TMP60:%.*]] = fadd <1 x double> [[TMP57]], [[TMP59]] +; RM-NEXT: [[TMP61:%.*]] = shufflevector <1 x double> [[TMP60]], <1 x double> undef, <3 x i32> +; RM-NEXT: [[TMP62:%.*]] = shufflevector <3 x double> [[TMP55]], <3 x double> [[TMP61]], <3 x i32> +; RM-NEXT: [[TMP63:%.*]] = shufflevector <3 x double> [[TMP20]], <3 x double> [[TMP41]], <6 x i32> +; RM-NEXT: [[TMP64:%.*]] = shufflevector <3 x double> [[TMP62]], <3 x double> undef, <6 x i32> +; RM-NEXT: [[TMP65:%.*]] = shufflevector <6 x double> [[TMP63]], <6 x double> [[TMP64]], <9 x i32> +; RM-NEXT: ret <9 x double> [[TMP65]] +; +entry: + %c = call <9 x double> @llvm.matrix.multiply.v6f64.v6f64.v6f64(<6 x double> %a, <6 x double> %b, i32 3, i32 2, i32 3) + ret <9 x double> %c +} + +declare <9 x double> @llvm.matrix.multiply.v6f64.v6f64.v6f64(<6 x double>, <6 x double>, i32, i32, i32)