diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc @@ -1,3 +1,8 @@ +ods_def: +def matmul(A: f32(M, K), B: f32(K, N)) -> (C: f32(M, N)) { + C(m, n) = std_addf(std_mulf(A(m, k), B(k, n))); +} + ods_def: def batch_matmul(A: f32(Batch, M, K), B: f32(Batch, K, N)) -> (C: f32(Batch, M, N)) { C(b, m, n) = std_addf(std_mulf(A(b, m, k), B(b, k, n))); diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td @@ -225,36 +225,6 @@ let hasFolder = 1; } -def MatmulOp : LinalgStructured_Op<"matmul", [NInputs<2>, NOutputs<1>]> { - - let arguments = (ins AnyStridedMemRefOfRank<2>, - AnyStridedMemRefOfRank<2>, - AnyStridedMemRefOfRank<2>); - - let extraClassDeclaration = libraryCallName # [{ - llvm::Optional> referenceIterators() { - return SmallVector{ - getParallelIteratorTypeName(), - getParallelIteratorTypeName(), - getReductionIteratorTypeName()}; - } - - // A(i, r_k) * B(r_k, j) -> C(i, j) - llvm::Optional> referenceIndexingMaps() { - MLIRContext *context = getContext(); - AffineExpr i, j, r_k; - bindDims(context, i, j, r_k); - return SmallVector{ - AffineMap::get(3, 0, {i, r_k}, context), - AffineMap::get(3, 0, {r_k, j},context), - AffineMap::get(3, 0, {i, j}, context) - }; - } - }]; - - let hasFolder = 1; -} - /// A base class for pooling operation such as conv. The arguments must contain /// optional arguments `strides`, `dilations` and `padding` with following type: /// OptionalAttr:$strides diff --git a/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp b/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp --- a/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp +++ b/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp @@ -541,8 +541,11 @@ LinalgOpConversion, LinalgOpConversion, LinalgOpConversion, - LinalgOpConversion, LinalgOpConversion>(ctx); + // TODO: collect all auto-generated named ops with a tblgen directive. + patterns.insert< + LinalgOpConversion, + LinalgOpConversion>(ctx); // clang-format on } diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -993,10 +993,6 @@ SmallVectorImpl &) { return foldMemRefCast(*this); } -LogicalResult MatmulOp::fold(ArrayRef, - SmallVectorImpl &) { - return foldMemRefCast(*this); -} OpFoldResult ReshapeOp::fold(ArrayRef) { if (succeeded(foldMemRefCast(*this))) return getResult(); @@ -1055,7 +1051,7 @@ p << op.getOperationName() << ' '; p.printOptionalAttrDict(op.getAttrs(), silentAttrNames); p << ' ' << op.getOperands(); - p << ": (" << op.getOperandTypes() << ")"; + p << " : (" << op.getOperandTypes() << ")"; auto outputTensorTypes = op.getResultTypes(); if (!outputTensorTypes.empty()) p << " -> (" << outputTensorTypes << ")"; @@ -1067,8 +1063,8 @@ SmallVector operandsInfo; // Optional attributes may be added. - if (parser.parseOptionalAttrDict(result.attributes) || - parser.parseOperandList(operandsInfo)) + if (parser.parseOperandList(operandsInfo) || + parser.parseOptionalAttrDict(result.attributes)) return failure(); SmallVector operandTypes; @@ -1104,3 +1100,7 @@ SmallVectorImpl &) { return foldMemRefCast(*this); } +LogicalResult MatmulOp::fold(ArrayRef, + SmallVectorImpl &) { + return foldMemRefCast(*this); +} diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp @@ -270,22 +270,6 @@ } }; -template -class LinalgScopedEmitter { -public: - static void emitScalarImplementation(ArrayRef allIvs, - MatmulOp matmulOp) { - assert(matmulOp.hasBufferSemantics() && - "expected linalg op with buffer semantics"); - assert(allIvs.size() == 3); - Value i(allIvs[0]), j(allIvs[1]), r_k(allIvs[2]); - IndexedValueType A(matmulOp.getInput(0)), B(matmulOp.getInput(1)), - C(matmulOp.getOutputBuffer(0)); - // Emit scalar form. - C(i, j) = C(i, j) + A(i, r_k) * B(r_k, j); - } -}; - template class LinalgScopedEmitter { public: @@ -790,7 +774,6 @@ INSTANTIATE_LINALG_OP_TO_LOOPS(FillOp) INSTANTIATE_LINALG_OP_TO_LOOPS(DotOp) INSTANTIATE_LINALG_OP_TO_LOOPS(MatvecOp) -INSTANTIATE_LINALG_OP_TO_LOOPS(MatmulOp) INSTANTIATE_LINALG_OP_TO_LOOPS(ConvOp) INSTANTIATE_LINALG_OP_TO_LOOPS(PoolingMaxOp) INSTANTIATE_LINALG_OP_TO_LOOPS(PoolingMinOp) diff --git a/mlir/test/Dialect/Linalg/affine.mlir b/mlir/test/Dialect/Linalg/affine.mlir --- a/mlir/test/Dialect/Linalg/affine.mlir +++ b/mlir/test/Dialect/Linalg/affine.mlir @@ -16,7 +16,7 @@ %A = view %arg0[%c0][%M, %K] : memref to memref %B = view %arg0[%c0][%K, %N] : memref to memref %C = view %arg0[%c0][%M, %N] : memref to memref - linalg.matmul(%A, %B, %C) : memref, memref, memref + linalg.matmul %A, %B, %C : (memref, memref, memref) return } diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir --- a/mlir/test/Dialect/Linalg/canonicalize.mlir +++ b/mlir/test/Dialect/Linalg/canonicalize.mlir @@ -14,7 +14,7 @@ // CHECK: linalg.slice {{.*}} : memref<16x16xf32>, !linalg.range, !linalg.range, memref %4 = linalg.slice %3[%r0, %r0] : memref, !linalg.range, !linalg.range, memref - // CHECK: linalg.matmul{{.*}}: memref<16x16xf32>, memref<16x16xf32>, memref<16x16xf32> - linalg.matmul(%3, %3, %3) : memref, memref, memref + // CHECK: linalg.matmul{{.*}}: (memref<16x16xf32>, memref<16x16xf32>, memref<16x16xf32>) + linalg.matmul %3, %3, %3 : (memref, memref, memref) return %4: memref } diff --git a/mlir/test/Dialect/Linalg/fusion-2-level.mlir b/mlir/test/Dialect/Linalg/fusion-2-level.mlir --- a/mlir/test/Dialect/Linalg/fusion-2-level.mlir +++ b/mlir/test/Dialect/Linalg/fusion-2-level.mlir @@ -12,7 +12,7 @@ %0 = dim %C, 0 : memref %1 = dim %C, 1 : memref %2 = dim %D, 1 : memref - linalg.matmul(%A, %B, %C) : memref, memref, memref + linalg.matmul %A, %B, %C : (memref, memref, memref) loop.for %arg5 = %c0 to %0 step %c20 { loop.for %arg6 = %c0 to %2 step %c30 { loop.for %arg7 = %c0 to %1 step %c40 { @@ -28,7 +28,7 @@ %14 = std.subview %5[%arg8, %arg10][%c2, %c4][%c1, %c1] : memref to memref %16 = std.subview %7[%arg10, %arg9][%c4, %c3][%c1, %c1]: memref to memref %17 = std.subview %8[%arg8, %arg9][%c2, %c4][%c1, %c1] : memref to memref - linalg.matmul(%14, %16, %17) : memref, memref, memref + linalg.matmul %14, %16, %17 : (memref, memref, memref) } } } diff --git a/mlir/test/Dialect/Linalg/fusion.mlir b/mlir/test/Dialect/Linalg/fusion.mlir --- a/mlir/test/Dialect/Linalg/fusion.mlir +++ b/mlir/test/Dialect/Linalg/fusion.mlir @@ -13,10 +13,10 @@ %0 = dim %A, 0 : memref %1 = dim %A, 1 : memref %2 = dim %B, 1 : memref - linalg.matmul(%A, %B, %C) : - memref, - memref, - memref + linalg.matmul %A, %B, %C : + (memref, + memref, + memref) %c1 = constant 1 : index loop.for %arg5 = %c0 to %0 step %c2 { loop.for %arg6 = %c0 to %2 step %c3 { @@ -30,10 +30,10 @@ %8 = std.subview %C[%arg5, %arg6][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%5, %7, %8) : - memref, - memref, - memref + linalg.matmul %5, %7, %8 : + (memref, + memref, + memref) } } } @@ -61,10 +61,10 @@ %c4 = constant 4 : index %c3 = constant 3 : index %c2 = constant 2 : index - linalg.matmul(%A, %B, %C) : - memref, - memref, - memref + linalg.matmul %A, %B, %C : + (memref, + memref, + memref) %0 = dim %C, 0 : memref %1 = dim %C, 1 : memref %2 = dim %D, 1 : memref @@ -80,10 +80,10 @@ %8 = std.subview %E[%arg5, %arg6][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%5, %7, %8) : - memref, - memref, - memref + linalg.matmul %5, %7, %8 : + (memref, + memref, + memref) } } } @@ -113,10 +113,10 @@ %c4 = constant 4 : index %c3 = constant 3 : index %c2 = constant 2 : index - linalg.matmul(%A, %B, %C) : - memref, - memref, - memref + linalg.matmul %A, %B, %C : + (memref, + memref, + memref) %0 = dim %D, 0 : memref %1 = dim %D, 1 : memref %2 = dim %C, 1 : memref @@ -132,10 +132,10 @@ %8 = std.subview %E[%arg5, %arg6][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%5, %7, %8) : - memref, - memref, - memref + linalg.matmul %5, %7, %8 : + (memref, + memref, + memref) } } } @@ -165,14 +165,14 @@ %c4 = constant 4 : index %c3 = constant 3 : index %c2 = constant 2 : index - linalg.matmul(%A, %B, %C) : - memref, - memref, - memref - linalg.matmul(%A, %B, %D) : - memref, - memref, - memref + linalg.matmul %A, %B, %C : + (memref, + memref, + memref) + linalg.matmul %A, %B, %D : + (memref, + memref, + memref) %0 = dim %C, 0 : memref %1 = dim %C, 1 : memref %2 = dim %D, 1 : memref @@ -188,10 +188,10 @@ %8 = std.subview %E[%arg5, %arg6][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%5, %7, %8) : - memref, - memref, - memref + linalg.matmul %5, %7, %8 : + (memref, + memref, + memref) } } } @@ -227,14 +227,14 @@ %0 = dim %B, 1 : memref %1 = dim %D, 0 : memref %2 = dim %D, 1 : memref - linalg.matmul(%A, %B, %C) : - memref, - memref, - memref - linalg.matmul(%C, %B, %D) : - memref, - memref, - memref + linalg.matmul %A, %B, %C : + (memref, + memref, + memref) + linalg.matmul %C, %B, %D : + (memref, + memref, + memref) loop.for %arg5 = %c0 to %1 step %c2 { loop.for %arg6 = %c0 to %0 step %c3 { loop.for %arg7 = %c0 to %2 step %c4 { @@ -247,10 +247,10 @@ %8 = std.subview %E[%arg5, %arg6][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%5, %7, %8) : - memref, - memref, - memref + linalg.matmul %5, %7, %8 : + (memref, + memref, + memref) } } } @@ -275,9 +275,9 @@ // CHECK-DAG: %[[A_I0:.*]] = subview %[[A]][%[[I]], %{{.*}}] // CHECK-DAG: %[[B_00:.*]] = subview %[[B]][%{{.*}}, %{{.*}}] // CHECK-DAG: %[[C_I0_:.*]] = subview %[[C]][%[[I]], %{{.*}}] -// CHECK: linalg.matmul(%[[A_I0]], %[[B_00]], %[[C_I0_]]) -// CHECK: linalg.matmul(%[[C_I0]], %[[B_0K]], %[[D_IK_]]) -// CHECK: linalg.matmul(%[[D_IK]], %[[B_KJ]], %[[E_IJ]]) +// CHECK: linalg.matmul %[[A_I0]], %[[B_00]], %[[C_I0_]] +// CHECK: linalg.matmul %[[C_I0]], %[[B_0K]], %[[D_IK_]] +// CHECK: linalg.matmul %[[D_IK]], %[[B_KJ]], %[[E_IJ]] // ----- @@ -297,14 +297,14 @@ %c3 = constant 3 : index %c2 = constant 2 : index %0 = dim %C, 1 : memref - linalg.matmul(%A, %B, %C) : - memref, - memref, - memref - linalg.matmul(%A, %C, %E) : - memref, - memref, - memref + linalg.matmul %A, %B, %C : + (memref, + memref, + memref) + linalg.matmul %A, %C, %E : + (memref, + memref, + memref) %1 = dim %C, 0 : memref %2 = dim %D, 1 : memref loop.for %arg5 = %c0 to %1 step %c2 { @@ -322,10 +322,10 @@ %8 = std.subview %E[%arg5, %arg6][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%5, %7, %8) : - memref, - memref, - memref + linalg.matmul %5, %7, %8 : + (memref, + memref, + memref) } } } @@ -359,14 +359,14 @@ %2 = dim %C, 1 : memref %3 = dim %C, 0 : memref %4 = dim %D, 1 : memref - linalg.matmul(%A, %C, %E) : - memref, - memref, - memref - linalg.matmul(%A, %B, %C) : - memref, - memref, - memref + linalg.matmul %A, %C, %E : + (memref, + memref, + memref) + linalg.matmul %A, %B, %C : + (memref, + memref, + memref) loop.for %arg5 = %c0 to %0 step %c2 { loop.for %arg6 = %c0 to %2 step %c3 { loop.for %arg7 = %c0 to %1 step %c4 { @@ -379,10 +379,10 @@ %10 = std.subview %E[%arg5, %arg6][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%7, %9, %10) : - memref, - memref, - memref + linalg.matmul %7, %9, %10 : + (memref, + memref, + memref) } } } @@ -398,10 +398,10 @@ %10 = std.subview %E[%arg5, %arg6][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%7, %9, %10) : - memref, - memref, - memref + linalg.matmul %7, %9, %10 : + (memref, + memref, + memref) } } } @@ -414,7 +414,7 @@ // CHECK: %[[C_1:.*]] = dim %[[C]], 1 : memref // CHECK: %[[C_0:.*]] = dim %[[C]], 0 : memref // CHECK: %[[D_1:.*]] = dim %[[D]], 1 : memref -// CHECK: linalg.matmul(%[[A]], %[[C]], %[[E]]) +// CHECK: linalg.matmul %[[A]], %[[C]], %[[E]] // CHECK: loop.for %{{.*}} = %{{.*}} to %[[A_0]] step %{{.*}} { // CHECK: loop.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} { // CHECK: loop.for %{{.*}} = %{{.*}} to %[[A_1]] step %{{.*}} { @@ -445,14 +445,14 @@ %c2 = constant 2 : index %0 = dim %A, 0 : memref %1 = dim %A, 1 : memref - linalg.matmul(%A, %C, %D) : - memref, - memref, - memref - linalg.matmul(%A, %B, %C) : - memref, - memref, - memref + linalg.matmul %A, %C, %D : + (memref, + memref, + memref) + linalg.matmul %A, %B, %C : + (memref, + memref, + memref) %2 = dim %D, 1 : memref loop.for %arg5 = %c0 to %0 step %c2 { loop.for %arg6 = %c0 to %2 step %c3 { @@ -469,10 +469,10 @@ %8 = std.subview %E[%arg5, %arg6][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%5, %7, %8) : - memref, - memref, - memref + linalg.matmul %5, %7, %8 : + (memref, + memref, + memref) } } } @@ -742,10 +742,10 @@ %B = alloca(%dim, %dim)[%s0, %s1] : memref %C = alloc(%dim, %dim)[%s0, %s1] : memref - linalg.matmul(%A, %B, %C) : - memref, - memref, - memref + linalg.matmul %A, %B, %C : + (memref, + memref, + memref) loop.for %i = %c0 to %dim step %c2 { loop.for %j = %c0 to %dim step %c3 { @@ -759,10 +759,10 @@ %2 = std.subview %C[%i, %j][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%0, %1, %2) : - memref, - memref, - memref + linalg.matmul %0, %1, %2 : + (memref, + memref, + memref) } } } diff --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir --- a/mlir/test/Dialect/Linalg/loops.mlir +++ b/mlir/test/Dialect/Linalg/loops.mlir @@ -33,7 +33,7 @@ %A = view %arg0[%c0][%M, %K] : memref to memref %B = view %arg0[%c0][%K, %N] : memref to memref %C = view %arg0[%c0][%M, %N] : memref to memref - linalg.matmul(%A, %B, %C) : memref, memref, memref + linalg.matmul %A, %B, %C : (memref, memref, memref) return } // CHECKLOOP-LABEL: func @matmul(%{{.*}}: memref, diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir --- a/mlir/test/Dialect/Linalg/promote.mlir +++ b/mlir/test/Dialect/Linalg/promote.mlir @@ -24,10 +24,16 @@ loop.for %arg4 = %c0 to %6 step %c2 { loop.for %arg5 = %c0 to %8 step %c3 { loop.for %arg6 = %c0 to %7 step %c4 { - %11 = std.subview %3[%arg4, %arg6][%c2, %c4][%c1, %c1] : memref to memref - %14 = std.subview %4[%arg6, %arg5][%c4, %c3][%c1, %c1] : memref to memref - %17 = std.subview %5[%arg4, %arg5][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%11, %14, %17) : memref, memref, memref + %11 = std.subview %3[%arg4, %arg6][%c2, %c4][%c1, %c1] : + memref to memref + %14 = std.subview %4[%arg6, %arg5][%c4, %c3][%c1, %c1] : + memref to memref + %17 = std.subview %5[%arg4, %arg5][%c2, %c3][%c1, %c1] : + memref to memref + linalg.matmul %11, %14, %17 : + (memref, + memref, + memref) } } } @@ -64,7 +70,8 @@ // CHECK: linalg.copy(%[[vB]], %[[partialB]]) : memref, memref // CHECK: linalg.copy(%[[vC]], %[[partialC]]) : memref, memref // -// CHECK: linalg.matmul(%[[fullA]], %[[fullB]], %[[fullC]]) : memref, memref, memref +// CHECK: linalg.matmul %[[fullA]], %[[fullB]], %[[fullC]] : +// CHECK-SAME: (memref, memref, memref) // // CHECK: linalg.copy(%[[partialC]], %[[vC]]) : memref, memref // @@ -89,10 +96,16 @@ loop.for %arg4 = %c0 to %6 step %c2 { loop.for %arg5 = %c0 to %8 step %c3 { loop.for %arg6 = %c0 to %7 step %c4 { - %11 = std.subview %3[%arg4, %arg6][%c2, %c4][%c1, %c1] : memref to memref - %14 = std.subview %4[%arg6, %arg5][%c4, %c3][%c1, %c1] : memref to memref - %17 = std.subview %5[%arg4, %arg5][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%11, %14, %17) : memref, memref, memref + %11 = std.subview %3[%arg4, %arg6][%c2, %c4][%c1, %c1] : + memref to memref + %14 = std.subview %4[%arg6, %arg5][%c4, %c3][%c1, %c1] : + memref to memref + %17 = std.subview %5[%arg4, %arg5][%c2, %c3][%c1, %c1] : + memref to memref + linalg.matmul %11, %14, %17 : + (memref, + memref, + memref) } } } @@ -129,75 +142,11 @@ // CHECK: linalg.copy(%[[vB_f64]], %[[partialB_f64]]) : memref, memref // CHECK: linalg.copy(%[[vC_f64]], %[[partialC_f64]]) : memref, memref // -// CHECK: linalg.matmul(%[[fullA_f64]], %[[fullB_f64]], %[[fullC_f64]]) : memref, memref, memref +// CHECK: linalg.matmul %[[fullA_f64]], %[[fullB_f64]], %[[fullC_f64]] : +// CHECK-SAME: (memref, memref, memref) // // CHECK: linalg.copy(%[[partialC_f64]], %[[vC_f64]]) : memref, memref // // CHECK: dealloc %[[tmpA_f64]] : memref<64xi8> // CHECK: dealloc %[[tmpB_f64]] : memref<96xi8> // CHECK: dealloc %[[tmpC_f64]] : memref<48xi8> - -// ----- - -func @matmul_i32(%A: memref, %M: index, %N: index, %K: index) { - %c4 = constant 4 : index - %c3 = constant 3 : index - %c2 = constant 2 : index - %c0 = constant 0 : index - %c1 = constant 1 : index - %3 = view %A[%c0][%M, %K] : memref to memref - %4 = view %A[%c0][%K, %N] : memref to memref - %5 = view %A[%c0][%M, %N] : memref to memref - %6 = dim %3, 0 : memref - %7 = dim %3, 1 : memref - %8 = dim %4, 1 : memref - loop.for %arg4 = %c0 to %6 step %c2 { - loop.for %arg5 = %c0 to %8 step %c3 { - loop.for %arg6 = %c0 to %7 step %c4 { - %11 = std.subview %3[%arg4, %arg6][%c2, %c4][%c1, %c1] : memref to memref - %14 = std.subview %4[%arg6, %arg5][%c4, %c3][%c1, %c1] : memref to memref - %17 = std.subview %5[%arg4, %arg5][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%11, %14, %17) : memref, memref, memref - } - } - } - return -} - -// CHECK-LABEL: func @matmul_i32(%{{.*}}: memref, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) { -// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { -// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { -// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { -// CHECK: %[[vA_i32:.*]] = subview {{.*}} : memref -// CHECK: %[[vB_i32:.*]] = subview {{.*}} : memref -// CHECK: %[[vC_i32:.*]] = subview {{.*}} : memref -/// -// CHECK: %[[tmpA_i32:.*]] = alloc() : memref<32xi8> -// CHECK: %[[fullA_i32:.*]] = std.view %[[tmpA_i32]][][{{.*}}] : memref<32xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref -// CHECK: %[[partialA_i32:.*]] = subview %[[fullA_i32]][%{{.*}}, %{{.*}}] : memref to memref -/// -// CHECK: %[[tmpB_i32:.*]] = alloc() : memref<48xi8> -// CHECK: %[[fullB_i32:.*]] = std.view %[[tmpB_i32]][][{{.*}}] : memref<48xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref -// CHECK: %[[partialB_i32:.*]] = subview %[[fullB_i32]][%{{.*}}, %{{.*}}] : memref to memref -/// -// CHECK: %[[tmpC_i32:.*]] = alloc() : memref<24xi8> -// CHECK: %[[fullC_i32:.*]] = std.view %[[tmpC_i32]][][{{.*}}] : memref<24xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref -// CHECK: %[[partialC_i32:.*]] = subview %[[fullC_i32]][%{{.*}}, %{{.*}}] : memref to memref - -// CHECK: linalg.fill(%[[fullA_i32]], {{.*}}) : memref, i32 -// CHECK: linalg.fill(%[[fullB_i32]], {{.*}}) : memref, i32 -// CHECK: linalg.fill(%[[fullC_i32]], {{.*}}) : memref, i32 -// CHECK: linalg.copy(%[[vA_i32]], %[[partialA_i32]]) : memref, memref -// CHECK: linalg.copy(%[[vB_i32]], %[[partialB_i32]]) : memref, memref -// CHECK: linalg.copy(%[[vC_i32]], %[[partialC_i32]]) : memref, memref -// -// CHECK: linalg.matmul(%[[fullA_i32]], %[[fullB_i32]], %[[fullC_i32]]) : memref, memref, memref -// -// CHECK: linalg.copy(%[[partialC_i32]], %[[vC_i32]]) : memref, memref -// -// CHECK: dealloc %[[tmpA_i32]] : memref<32xi8> -// CHECK: dealloc %[[tmpB_i32]] : memref<48xi8> -// CHECK: dealloc %[[tmpC_i32]] : memref<24xi8> diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir --- a/mlir/test/Dialect/Linalg/roundtrip.mlir +++ b/mlir/test/Dialect/Linalg/roundtrip.mlir @@ -86,9 +86,10 @@ %arg1: memref, %arg2: memref, %arg3: memref) { - linalg.matmul(%arg0, %arg0, %arg0) : memref, - memref, - memref + linalg.matmul %arg0, %arg0, %arg0 : + (memref, + memref, + memref) -> () linalg.matvec(%arg0, %arg1, %arg2) : memref, memref, memref @@ -98,10 +99,10 @@ return } // CHECK-LABEL: func @ops(% -// CHECK-NEXT: linalg.matmul(%{{.*}}, %{{.*}}, %{{.*}}) : -// CHECK-SAME: memref, -// CHECK-SAME: memref, -// CHECK-SAME: memref +// CHECK-NEXT: linalg.matmul %{{.*}}, %{{.*}}, %{{.*}} : +// CHECK-SAME: (memref, +// CHECK-SAME: memref, +// CHECK-SAME: memref) // CHECK-NEXT: linalg.matvec(%{{.*}}, %{{.*}}, %{{.*}}) : // CHECK-SAME: memref, // CHECK-SAME: memref, diff --git a/mlir/test/Dialect/Linalg/tile.mlir b/mlir/test/Dialect/Linalg/tile.mlir --- a/mlir/test/Dialect/Linalg/tile.mlir +++ b/mlir/test/Dialect/Linalg/tile.mlir @@ -35,7 +35,7 @@ // REACTIVATE_ME_TILE-234-DAG: #[[stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)> func @matmul(%arg0: memref, %arg1: memref, %arg2: memref) { - linalg.matmul(%arg0, %arg1, %arg2) : memref, memref, memref + linalg.matmul %arg0, %arg1, %arg2 : (memref, memref, memref) return } // TILE-2-LABEL: func @matmul( @@ -52,7 +52,7 @@ // TILE-2: %[[szK:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localK]], %[[I]]) // TILE-2: %[[N:.*]] = dim %{{.*}}, 1 : memref // TILE-2: %[[sCi:.*]] = subview %{{.*}}[%[[I]], %[[C0]]] [%[[szK]], %[[N]]] [%[[C1]], %[[C1]]] : memref to memref -// TILE-2: linalg.matmul(%[[sAi]], %{{.*}}, %[[sCi]]) : memref, memref, memref +// TILE-2: linalg.matmul %[[sAi]], %{{.*}}, %[[sCi]] : (memref, memref, memref) // TILE-02-LABEL: func @matmul( // TILE-02-DAG: %[[C0:.*]] = constant 0 : index @@ -68,7 +68,7 @@ // TILE-02: %[[localK:.*]] = dim %{{.*}}, 1 // TILE-02: %[[szK:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localK]], %[[J]]) // TILE-02: %[[sCj:.*]] = subview %{{.*}}[%[[C0]], %[[J]]] [%[[M]], %[[szK]]] [%[[C1]], %[[C1]]] : memref to memref -// TILE-02: linalg.matmul(%{{.*}}, %[[sBj]], %[[sCj]]) : memref, memref, memref +// TILE-02: linalg.matmul %{{.*}}, %[[sBj]], %[[sCj]] : (memref, memref, memref) // TILE-002-LABEL: func @matmul( // TILE-002-DAG: %[[C0:.*]] = constant 0 : index @@ -84,7 +84,7 @@ // TILE-002: %[[szK:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localK]], %[[K]]) // TILE-002: %[[N:.*]] = dim %{{.*}}, 1 : memref // TILE-002: %[[sBj:.*]] = subview %{{.*}}[%[[K]], %[[C0]]] [%[[szK]], %[[N]]] [%[[C1]], %[[C1]]] : memref to memref -// TILE-002: linalg.matmul(%[[sAj]], %[[sBj]], %{{.*}}) : memref, memref, memref +// TILE-002: linalg.matmul %[[sAj]], %[[sBj]], %{{.*}} : (memref, memref, memref) // TILE-234-LABEL: func @matmul( // TILE-234-DAG: %[[C0:.*]] = constant 0 : index @@ -114,14 +114,14 @@ // TILE-234: %[[szN:.*]] = affine.min #[[bound_map_3]](%[[C3]], %[[localN]], %[[J]]) // TILE-234: %[[sCij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [%[[C1]], %[[C1]]] : memref to memref // -// TILE-234: linalg.matmul(%[[sAik]], %[[sBkj]], %[[sCij]]) : memref, memref, memref +// TILE-234: linalg.matmul %[[sAik]], %[[sBkj]], %[[sCij]] : (memref, memref, memref) // When the buffer shapes are known at compile time, it is possible to avoid // the "min" in subview size computation. This test uses buffer sizes divisible // by respective tile sizes (M=10 divisble by 2, N=12 divisible by 2 and 3, // K=16 divisble by 2 and 4). func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>, %arg1: memref<16x12xf32, offset: ?, strides: [?, 1]>, %arg2: memref<10x12xf32, offset: ?, strides: [?, 1]>) { - linalg.matmul(%arg0, %arg1, %arg2) : memref<10x16xf32, offset: ?, strides: [?, 1]>, memref<16x12xf32, offset: ?, strides: [?, 1]>, memref<10x12xf32, offset: ?, strides: [?, 1]> + linalg.matmul %arg0, %arg1, %arg2 : (memref<10x16xf32, offset: ?, strides: [?, 1]>, memref<16x12xf32, offset: ?, strides: [?, 1]>, memref<10x12xf32, offset: ?, strides: [?, 1]>) return } // TILE-2-LABEL: func @matmul_static( @@ -134,7 +134,7 @@ // TILE-2: %[[sAi:.*]] = subview %{{.*}}[%[[I]], %[[C0]]] [%[[C2]], %[[K]]] [%[[C1]], %[[C1]]] : memref<10x16xf32, #[[strided2D]]> to memref // TILE-2: %[[N:.*]] = dim %{{.*}}, 1 : memref<10x12xf32, #[[strided2D]]> // TILE-2: %[[sCi:.*]] = subview %{{.*}}[%[[I]], %[[C0]]] [%[[C2]], %[[N]]] [%[[C1]], %[[C1]]] : memref<10x12xf32, #[[strided2D]]> to memref -// TILE-2: linalg.matmul(%[[sAi]], %{{.*}}, %[[sCi]]) +// TILE-2: linalg.matmul %[[sAi]], %{{.*}}, %[[sCi]] // TILE-02-LABEL: func @matmul_static( // TILE-02-DAG: %[[C0:.*]] = constant 0 : index @@ -148,7 +148,7 @@ // TILE-02: %[[M:.*]] = dim %{{.*}}, 0 : memref<10x12xf32, #[[strided2D]]> // TILE-02-NOT: affine.min // TILE-02: %[[sCj:.*]] = subview %{{.*}}[%[[C0]], %[[J]]] [%[[M]], %[[C2]]] [%[[C1]], %[[C1]]] : memref<10x12xf32, #[[strided2D]]> to memref -// TILE-02: linalg.matmul(%{{.*}}, %[[sBj]], %[[sCj]]) : memref<10x16xf32, #[[strided2D]]>, memref, memref +// TILE-02: linalg.matmul %{{.*}}, %[[sBj]], %[[sCj]] : (memref<10x16xf32, #[[strided2D]]>, memref, memref) // TILE-002-LABEL: func @matmul_static( // TILE-002-DAG: %[[C0:.*]] = constant 0 : index @@ -162,7 +162,7 @@ // TILE-002: %[[N:.*]] = dim %{{.*}}, 1 : memref<16x12xf32, #[[strided2D]]> // TILE-002-NOT: affine.min // TILE-002: %[[sBj:.*]] = subview %{{.*}}[%[[K]], %[[C0]]] [%[[C2]], %[[N]]] [%[[C1]], %[[C1]]] : memref<16x12xf32, #[[strided2D]]> to memref -// TILE-002: linalg.matmul(%[[sAj]], %[[sBj]], %{{.*}}) : memref, memref, memref<10x12xf32, #[[strided2D]]> +// TILE-002: linalg.matmul %[[sAj]], %[[sBj]], %{{.*}} : (memref, memref, memref<10x12xf32, #[[strided2D]]>) // TILE-234-LABEL: func @matmul_static( // TILE-234-DAG: %[[C0:.*]] = constant 0 : index @@ -183,7 +183,7 @@ // TILE-234-NOT: affine.min // TILE-234: %[[sCij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[C2]], %[[C3]]] [%[[C1]], %[[C1]]] : memref<10x12xf32, #[[strided2D]]> to memref // -// TILE-234: linalg.matmul(%[[sAik]], %[[sBkj]], %[[sCij]]) : memref, memref, memref +// TILE-234: linalg.matmul %[[sAik]], %[[sBkj]], %[[sCij]] : (memref, memref, memref) func @matvec(%arg0: memref, %arg1: memref, %arg2: memref) { linalg.matvec(%arg0, %arg1, %arg2) : memref, memref, memref diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir --- a/mlir/test/Dialect/Linalg/transform-patterns.mlir +++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir @@ -50,9 +50,9 @@ func @matmul(%A: memref, %B: memref, %C: memref) { - linalg.matmul(%A, %B, %C) : memref, + linalg.matmul %A, %B, %C : (memref, memref, - memref + memref) return } // CHECK-LABEL: func @matmul @@ -81,7 +81,7 @@ // CHECK: loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c2]] { // CHECK: loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c3]] { // CHECK: loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c4]] { -// CHECK: linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref, memref, memref +// CHECK: linalg.matmul {{.*}}, {{.*}}, {{.*}} : (memref, memref, memref) #matmul_trait = { args_in = 2, @@ -116,8 +116,8 @@ func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>, %C: memref<8x32xf32>) { - linalg.matmul(%A, %B, %C) { __internal_linalg_transform__ = "VECTORIZE"} : - memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> + linalg.matmul %A, %B, %C { __internal_linalg_transform__ = "VECTORIZE"} : + (memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32>) return } // CHECK-LABEL: func @vectorization_test_2 @@ -215,10 +215,10 @@ func @matmul_perm(%A: memref, %B: memref, %C: memref) { - linalg.matmul(%A, %B, %C) {__internal_linalg_transform__ = "__with_perm__"} : - memref, - memref, - memref + linalg.matmul %A, %B, %C {__internal_linalg_transform__ = "__with_perm__"} : + (memref, + memref, + memref) return } // CHECK-LABEL: func @matmul_perm @@ -241,7 +241,7 @@ // CHECK: loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c20]] { // CHECK: loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c30]] { // CHECK: loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c40]] { -// CHECK: linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref, memref, memref +// CHECK: linalg.matmul {{.*}}, {{.*}}, {{.*}} : (memref, memref, memref) func @promote_subview_matmul(%arg0: memref, %arg1: memref, @@ -263,10 +263,10 @@ memref to memref %5 = subview %arg2[%arg3, %arg4][%c2000, %c3000][%c1, %c1] : memref to memref - linalg.matmul(%3, %4, %5) {__internal_linalg_transform__ = "_promote_views_"} : - memref, - memref, - memref + linalg.matmul %3, %4, %5 {__internal_linalg_transform__ = "_promote_views_"} : + (memref, + memref, + memref) } } } @@ -291,7 +291,7 @@ // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref // CHECK: linalg.copy(%[[s1]], %[[l1]]) : memref, memref // CHECK: linalg.copy(%[[s2]], %[[l2]]) : memref, memref -// CHECK: linalg.matmul(%[[v0]], %[[v1]], %[[v2]]) : memref, memref, memref +// CHECK: linalg.matmul %[[v0]], %[[v1]], %[[v2]] : (memref, memref, memref) func @promote_first_subview_matmul(%arg0: memref, %arg1: memref, @@ -313,10 +313,10 @@ memref to memref %5 = std.subview %arg2[%arg3, %arg4][%c2000, %c3000][%c1, %c1] : memref to memref - linalg.matmul(%3, %4, %5) {__internal_linalg_transform__ = "_promote_first_view_"} : - memref, - memref, - memref + linalg.matmul %3, %4, %5 {__internal_linalg_transform__ = "_promote_first_view_"} : + (memref, + memref, + memref) } } } @@ -341,7 +341,7 @@ // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref // CHECK-NOT: linalg.copy(%[[s1]], %[[l1]]) : memref, memref // CHECK-NOT: linalg.copy(%[[s2]], %[[l2]]) : memref, memref^ -// CHECK: linalg.matmul(%[[v0]], %[[s1]], %[[s2]]) : memref, memref, memref +// CHECK: linalg.matmul %[[v0]], %[[s1]], %[[s2]] : (memref, memref, memref) func @aligned_promote_fill(%arg0: memref) { %c2000 = constant 2000 : index diff --git a/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir b/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir --- a/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir +++ b/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir @@ -86,7 +86,7 @@ %B = view %bB[][%c16, %c10] : memref to memref %C = view %bC[][%c10, %c10] : memref to memref - linalg.matmul(%A, %B, %C) : memref, memref, memref + linalg.matmul %A, %B, %C : (memref, memref, memref) %res = load %C[%c6, %c7] : memref dealloc %bC : memref diff --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp --- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp +++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp @@ -1489,6 +1489,10 @@ TypeRange inputTypes, TypeRange outputTypes); static void regionBuilder(Block &block); + + std::string getLibraryCallName() {{ + return generateLibraryCallName(getOperation()); + } }]; })FMT";