diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h @@ -36,9 +36,6 @@ class ConvOp; class LinalgOp; -class PoolingMaxOp; -class PoolingMinOp; -class PoolingSumOp; // TOFO: allow an extra ValueRange to specify an indexing and allow // non-hyperrectangular shapes. diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td @@ -417,89 +417,6 @@ let hasFolder = 1; } -// Only support buffer semantics. -class SingleInputPoolingBase_Op - : PoolingBase_Op { - let description = [{ - A base class for single input pooling function. - - TODO: Figure out a better way to handle window dimensions, i.e., eliminate - the fake memref. - The window dimensions are specified by argument `windowDims`. The i-th - dimension in the shape of `windowDims` denotes the size of the window along - dimension i. For example, if the window size is 2x3, then a memref<2x3> - should be passed to the operation as `windowDims`. - }]; - - let arguments = (ins AnyStridedMemRef:$input, - AnyStridedMemRef:$windowDims, - AnyStridedMemRef:$output, - OptionalAttr:$strides, - OptionalAttr:$dilations, - OptionalAttr:$padding); - - let extraClassDeclaration = commonUtils# [{ - ValueRange inputs() { return getOperands().slice(0, 2); } - ValueRange outputs() { return getOperands().take_back(); } - - ArrayAttr iterator_types() { - // Outer parallel loops are always the number of output dimensions. - int64_t nPar = getRank(getOutputOperand(0)); - // The window loops has the same number loops with output dimensions. - unsigned nWin = nPar; - SmallVector iters(nPar, getParallelIteratorTypeName()); - iters.reserve(nPar + nWin); - iters.append(nWin, getWindowIteratorTypeName()); - return Builder(getContext()).getStrArrayAttr(iters); - } - - ArrayAttr indexing_maps() { - MLIRContext *context = getContext(); - auto nPar = getNumParallelLoops(); - auto nWin = getNumWindowLoops(); - assert(nWin > 0 && "expected at least one window dimension"); - unsigned idx = 0; - auto outputDims = makeAffineDimExprs(nPar, idx, context); - auto windowDims = makeAffineDimExprs(nWin, idx, context); - // Construct the weighedSum expression. - auto inputDims = - weightedPoolingInputIndex(*this, outputDims, windowDims); - return Builder(getContext()).getAffineMapArrayAttr({ - // input - AffineMap::get(idx, 0, inputDims, context), - // windowDims - AffineMap::get(idx, 0, windowDims, context), - // output - AffineMap::get(idx, 0, outputDims, context)}); - } - }]; - - let verifier = [{ return ::verify(*this); }]; - - let hasFolder = 1; -} - -def PoolingMaxOp: SingleInputPoolingBase_Op<"pooling_max"> { - let description = [{ - Takes max op as pooling operation, i.e., it samples the maximum value in the - window. - }]; -} - -def PoolingMinOp: SingleInputPoolingBase_Op<"pooling_min"> { - let description = [{ - Takes min op as pooling operation, i.e., it samples the minimum value in the - window. - }]; -} - -def PoolingSumOp: SingleInputPoolingBase_Op<"pooling_sum"> { - let description = [{ - Takes add op as pooling operation, i.e., it accumulates the values in the - window. - }]; -} - //===----------------------------------------------------------------------===// // Generic Linalg ops. //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -2638,55 +2638,6 @@ return success(); } -template -static LogicalResult verifySingleInputPoolingOp(PoolingOp op) { - auto inputType = op.input().getType().template cast(); - auto outputType = op.output().getType().template cast(); - if (outputType.getElementType() != inputType.getElementType()) - return op.emitOpError("expects memref elemental types to match"); - - auto windowDimsType = op.windowDims().getType().template cast(); - if (outputType.getRank() != inputType.getRank() || - outputType.getRank() != windowDimsType.getRank()) - return op.emitOpError("expects memref ranks to match"); - - if (auto strides = op.strides()) { - if (failed(verifyStrideOrDilation(op, strides->getValue(), - /*isStride=*/true))) - return failure(); - } - if (auto dilations = op.dilations()) { - if (failed(verifyStrideOrDilation(op, dilations->getValue(), - /*isStride=*/false))) - return failure(); - } - return success(); -} - -#define DEFINE_POOLING_OP_GET_EFFECTS(OP_NAME) \ - void OP_NAME::getEffects( \ - SmallVectorImpl> \ - &effects) { \ - effects.emplace_back(MemoryEffects::Read::get(), input(), \ - SideEffects::DefaultResource::get()); \ - effects.emplace_back(MemoryEffects::Write::get(), output(), \ - SideEffects::DefaultResource::get()); \ - } - -static LogicalResult verify(PoolingMaxOp op) { - return verifySingleInputPoolingOp(op); -} -static LogicalResult verify(PoolingMinOp op) { - return verifySingleInputPoolingOp(op); -} -static LogicalResult verify(PoolingSumOp op) { - return verifySingleInputPoolingOp(op); -} - -DEFINE_POOLING_OP_GET_EFFECTS(PoolingMaxOp) -DEFINE_POOLING_OP_GET_EFFECTS(PoolingMinOp) -DEFINE_POOLING_OP_GET_EFFECTS(PoolingSumOp) - #include "mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yamlgen.cpp.inc" #define GET_OP_CLASSES @@ -2756,9 +2707,6 @@ ArrayRef windowDims); INSTANTIATE_WEIGHTED_POOLING_INPUT_INDEX(ConvOp) -INSTANTIATE_WEIGHTED_POOLING_INPUT_INDEX(PoolingMaxOp) -INSTANTIATE_WEIGHTED_POOLING_INPUT_INDEX(PoolingMinOp) -INSTANTIATE_WEIGHTED_POOLING_INPUT_INDEX(PoolingSumOp) SmallVector mlir::linalg::concat(ArrayRef a, ArrayRef b) { @@ -3215,9 +3163,6 @@ } LINALGOP_FOLDERS(ConvOp) -LINALGOP_FOLDERS(PoolingMaxOp) -LINALGOP_FOLDERS(PoolingMinOp) -LINALGOP_FOLDERS(PoolingSumOp) LINALGOP_FOLDERS(CopyOp) LINALGOP_FOLDERS(FillOp) LINALGOP_FOLDERS(GenericOp) diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp @@ -209,53 +209,12 @@ namespace { /// The padding value for a given Op depends on the semantics of the Op. -/// The identity value for ConvOp and PoolingSumOp is 0, for PoolingMaxOp is -/// -inf or minInt and for PoolingMinOp is inf or maxInt. +/// The identity value for ConvOp is 0. template Attribute getPadValueAttr(Type type) { llvm_unreachable("Unexpected op type for getPadValueAttr"); return {}; } -template <> Attribute getPadValueAttr(Type type) { - if (auto floatType = type.dyn_cast()) { - return OpBuilder(type.getContext()) - .getFloatAttr(floatType, APFloat::getInf(floatType.getFloatSemantics(), - /*Negative*/ true)); - } - if (auto intType = type.dyn_cast()) { - unsigned width = intType.getWidth(); - // The select instruction used to lower the PoolingMin uses a signed - // comparison, use a signed constant irrespective of the signedness of the - // integer type. - return OpBuilder(type.getContext()) - .getIntegerAttr(intType, APInt::getSignedMinValue(width)); - } - llvm_unreachable("Unsupported data type for PoolingMaxOp"); - return {}; -} - -template <> Attribute getPadValueAttr(Type type) { - if (auto floatType = type.dyn_cast()) { - return OpBuilder(type.getContext()) - .getFloatAttr(floatType, - APFloat::getInf(floatType.getFloatSemantics())); - } - if (auto intType = type.dyn_cast()) { - unsigned width = intType.getWidth(); - // The select instruction used to lower the PoolingMin uses a signed - // comparison, use a signed constant irrespective of the signedness of the - // integer type. - return OpBuilder(type.getContext()) - .getIntegerAttr(intType, APInt::getSignedMaxValue(width)); - } - llvm_unreachable("Unsupported data type for PoolingMinOp"); - return {}; -} - -template <> Attribute getPadValueAttr(Type type) { - return OpBuilder(type.getContext()).getZeroAttr(type); -} - template <> Attribute getPadValueAttr(Type type) { return OpBuilder(type.getContext()).getZeroAttr(type); } @@ -311,72 +270,6 @@ } } -template static bool hasPadding(PoolingOp poolingOp) { - for (unsigned i = 0, e = poolingOp.getNumWindowLoops(); i < e; ++i) { - if (poolingOp.getLowPad(i) > 0 || poolingOp.getHighPad(i) > 0) - return true; - } - return false; -} - -template -static Value getPoolingInput(OpBuilder &b, Location loc, PoolingOp op, - ArrayRef inputIndices) { - if (hasPadding(op)) { - Type type = - op.input().getType().template cast().getElementType(); - Value padValue = - b.create(loc, type, getPadValueAttr(type)); - return getPaddedInput(b, loc, op.input(), inputIndices, - /*Pad every dimension*/ {}, padValue); - } - return b.create(loc, op.input(), inputIndices); -} - -template -void emitPoolingMinMaxScalarImplementation(OpBuilder &b, Location loc, - ArrayRef allIvs, OpType op) { - InputAndOutputIndices indices = getInputAndOutputIndices(b, loc, allIvs, op); - Value lhs = b.create(loc, op.output(), indices.outputs); - Value rhs = getPoolingInput(b, loc, op, indices.inputs); - Value value = llvm::TypeSwitch(op) - .Case([&](PoolingMinOp poolingOp) { - return ArithBuilder(b, loc).select( - ArithBuilder(b, loc).slt(lhs, rhs), lhs, rhs); - }) - .Case([&](PoolingMaxOp poolingOp) { - return ArithBuilder(b, loc).select( - ArithBuilder(b, loc).sgt(lhs, rhs), lhs, rhs); - }) - .Default([&](auto) { return Value(); }); - b.create(loc, value, op.output(), indices.outputs); -} - -template -static void emitScalarImplementation(OpBuilder &b, Location loc, - ArrayRef allIvs, PoolingMaxOp op) { - emitPoolingMinMaxScalarImplementation( - b, loc, allIvs, op); -} - -template -static void emitScalarImplementation(OpBuilder &b, Location loc, - ArrayRef allIvs, PoolingMinOp op) { - emitPoolingMinMaxScalarImplementation( - b, loc, allIvs, op); -} - -template -static void emitScalarImplementation(OpBuilder &b, Location loc, - ArrayRef allIvs, PoolingSumOp op) { - auto indices = getInputAndOutputIndices(b, loc, allIvs, op); - Value inputVal = - getPoolingInput(b, loc, op, indices.inputs); - Value outputVal = b.create(loc, op.output(), indices.outputs); - Value added = ArithBuilder(b, loc).add(outputVal, inputVal); - b.create(loc, added, op.output(), indices.outputs); -} - /// Replace the index operations in the body of the loop nest by the matching /// induction variables. static void replaceIndexOpsByInductionVariables(LinalgOp linalgOp, @@ -436,11 +329,9 @@ "expect operands are captured and not passed by loop argument"); allIvs.append(ivs.begin(), ivs.end()); llvm::TypeSwitch(linalgOp) - .Case( - [&](auto op) { - emitScalarImplementation(b, loc, allIvs, - op); - }) + .Case([&](auto op) { + emitScalarImplementation(b, loc, allIvs, op); + }) .Default([&](Operation *op) { assert(false && "unexpected op"); }); return scf::ValueVector{}; }); diff --git a/mlir/test/Dialect/Linalg/affine.mlir b/mlir/test/Dialect/Linalg/affine.mlir --- a/mlir/test/Dialect/Linalg/affine.mlir +++ b/mlir/test/Dialect/Linalg/affine.mlir @@ -126,26 +126,3 @@ // CHECK: %[[res:.*]] = addf %[[vc]], %[[inc]] : f32 // CHECK: affine.store %[[res]], %[[mC]][%[[b]], %[[m]], %[[n]]] : memref -// CHECK-LABEL: func @pooling_max_min -func @pooling_max_min(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_max(%arg0, %arg1, %arg2) { strides = [2, 1] }: - memref, memref, memref - linalg.pooling_min(%arg0, %arg1, %arg2) { strides = [2, 1] }: - memref, memref, memref - return -} -// This is a basic check to make sure the right load/stores are used. loops.mlir -// checks for the rest. -// CHECK: affine.load -// CHECK-NEXT: affine.load -// CHECK-NEXT: cmpf -// CHECK-NEXT: select -// CHECK-NEXT: affine.store -// The min pooling body. -// CHECK: affine.load -// CHECK-NEXT: affine.load -// CHECK-NEXT: cmpf -// CHECK-NEXT: select -// CHECK-NEXT: affine.store diff --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir --- a/mlir/test/Dialect/Linalg/invalid.mlir +++ b/mlir/test/Dialect/Linalg/invalid.mlir @@ -308,17 +308,6 @@ // ----- -func @pooling_rank_mismatch(%arg0: memref, - %arg1: memref<2x3xf32>, - %arg2: memref) { - // expected-error @+1 {{expected operand rank (2) to match the result rank of indexing_map #1 (3)}} - linalg.pooling_max(%arg0, %arg1, %arg2) {strides = [2, 1, 2]}: - memref, memref<2x3xf32>, memref - return -} - -// ----- - func @named_ops(%a3: memref, %b3: memref, %c3: memref) { // expected-error @+1 {{expected operand rank (2) to match the result rank of indexing_map #1 (3)}} linalg.batch_matmul ins(%a3, %b3: memref, memref) diff --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir --- a/mlir/test/Dialect/Linalg/loops.mlir +++ b/mlir/test/Dialect/Linalg/loops.mlir @@ -14,8 +14,6 @@ // CHECK-DAG: #[[$stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)> // CHECK-DAG: #[[$stride2Dilation4:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1 * 4)> // CHECK-DAG: #[[$stride3Dilation5:.*]] = affine_map<(d0, d1) -> (d0 * 3 + d1 * 5)> -// CHECK-DAG: #[[$stride1Dilation1Padding1:.*]] = affine_map<(d0, d1) -> (d0 + d1 - 1)> -// CHECK-DAG: #[[$stride1Dilation1Padding2:.*]] = affine_map<(d0, d1) -> (d0 + d1 - 2)> // CHECKPARALLEL-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> // CHECKPARALLEL-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> @@ -27,8 +25,6 @@ // CHECKPARALLEL-DAG: #[[$stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)> // CHECKPARALLEL-DAG: #[[$stride2Dilation4:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1 * 4)> // CHECKPARALLEL-DAG: #[[$stride3Dilation5:.*]] = affine_map<(d0, d1) -> (d0 * 3 + d1 * 5)> -// CHECKPARALLEL-DAG: #[[$stride1Dilation1Padding1:.*]] = affine_map<(d0, d1) -> (d0 + d1 - 1)> -// CHECKPARALLEL-DAG: #[[$stride1Dilation1Padding2:.*]] = affine_map<(d0, d1) -> (d0 + d1 - 2)> func @matmul(%arg0: memref, %M: index, %N: index, %K: index) { %c0 = constant 0 : index @@ -426,404 +422,6 @@ // CHECKPARALLEL: addf // CHECKPARALLEL: store %{{.*}}, {{.*}} : memref -func @pooling_max(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_max(%arg0, %arg1, %arg2) { strides = [2, 1] }: - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_max -// CHECK: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECK: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECK: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECK: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECK: scf.for {{.*}} to %[[OX]] -// CHECK: scf.for {{.*}} to %[[OY]] -// CHECK: scf.for {{.*}} to %[[WX]] -// CHECK: scf.for {{.*}} to %[[WY]] -// CHECK: %[[IX:.*]] = affine.apply #[[$stride2Dilation1]] -// CHECK: %[[IY:.*]] = affine.apply #[[$stride1Dilation1]] -// CHECK: memref.load {{.*}} : memref -// CHECK: memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref -// CHECK: %[[RES:.*]] = select %{{.*}}, -// CHECK: store %[[RES]], {{.*}} : memref - -// CHECKPARALLEL-LABEL: func @pooling_max -// CHECKPARALLEL: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECKPARALLEL: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECKPARALLEL: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECKPARALLEL: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECKPARALLEL: scf.parallel {{.*}} to (%[[OX]], %[[OY]]) -// CHECKPARALLEL: scf.for {{.*}} to %[[WX]] -// CHECKPARALLEL: scf.for {{.*}} to %[[WY]] -// CHECKPARALLEL: %[[IX:.*]] = affine.apply #[[$stride2Dilation1]] -// CHECKPARALLEL: %[[IY:.*]] = affine.apply #[[$stride1Dilation1]] -// CHECKPARALLEL: memref.load {{.*}} : memref -// CHECKPARALLEL: memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref -// CHECKPARALLEL: %[[RES:.*]] = select %{{.*}}, -// CHECKPARALLEL: store %[[RES]], {{.*}} : memref - -func @pooling_max_padding(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_max(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } : - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_max_padding -// CHECK: %[[PAD:.*]] = constant 0xFF800000 : f32 -// CHECK: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECK: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECK: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECK: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECK: scf.for {{.*}} to %[[OX]] -// CHECK: scf.for {{.*}} to %[[OY]] -// CHECK: scf.for {{.*}} to %[[WX]] -// CHECK: scf.for {{.*}} to %[[WY]] -// CHECK: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECK: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECK: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECK: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECK: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECK: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECK: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32 -// CHECK: %[[CMP:.*]] = cmpf ogt, %[[RHS]], %[[SEL]] : f32 -// CHECK: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32 -// CHECK: store %[[RES]], {{.*}} : memref - -// CHECKPARALLEL-LABEL: func @pooling_max_padding -// CHECKPARALLEL: %[[PAD:.*]] = constant 0xFF800000 : f32 -// CHECKPARALLEL: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECKPARALLEL: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECKPARALLEL: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECKPARALLEL: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECKPARALLEL: scf.parallel {{.*}} to (%[[OX]], %[[OY]]) -// CHECKPARALLEL: scf.for {{.*}} to %[[WX]] -// CHECKPARALLEL: scf.for {{.*}} to %[[WY]] -// CHECKPARALLEL: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECKPARALLEL: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECKPARALLEL: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECKPARALLEL: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECKPARALLEL: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECKPARALLEL: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECKPARALLEL: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32 -// CHECKPARALLEL: %[[CMP:.*]] = cmpf ogt, %[[RHS]], %[[SEL]] : f32 -// CHECKPARALLEL: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32 -// CHECKPARALLEL: store %[[RES]], {{.*}} : memref - -func @pooling_max_padding_i32(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_max(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } : - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_max_padding_i32 -// CHECK: %[[PAD:.*]] = constant -2147483648 : i32 -// CHECK: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECK: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECK: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECK: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECK: scf.for {{.*}} to %[[OX]] -// CHECK: scf.for {{.*}} to %[[OY]] -// CHECK: scf.for {{.*}} to %[[WX]] -// CHECK: scf.for {{.*}} to %[[WY]] -// CHECK: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECK: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECK: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECK: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECK: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECK: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECK: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32 -// CHECK: %[[CMP:.*]] = cmpi sgt, %[[RHS]], %[[SEL]] : i32 -// CHECK: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32 -// CHECK: store %[[RES]], {{.*}} : memref - -// CHECKPARALLEL-LABEL: func @pooling_max_padding_i32 -// CHECKPARALLEL: %[[PAD:.*]] = constant -2147483648 : i32 -// CHECKPARALLEL: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECKPARALLEL: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECKPARALLEL: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECKPARALLEL: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECKPARALLEL: scf.parallel {{.*}} to (%[[OX]], %[[OY]]) -// CHECKPARALLEL: scf.for {{.*}} to %[[WX]] -// CHECKPARALLEL: scf.for {{.*}} to %[[WY]] -// CHECKPARALLEL: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECKPARALLEL: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECKPARALLEL: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECKPARALLEL: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECKPARALLEL: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECKPARALLEL: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECKPARALLEL: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32 -// CHECKPARALLEL: %[[CMP:.*]] = cmpi sgt, %[[RHS]], %[[SEL]] : i32 -// CHECKPARALLEL: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32 -// CHECKPARALLEL: store %[[RES]], {{.*}} : memref - -func @pooling_min(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_min(%arg0, %arg1, %arg2) { strides = [2, 1] }: - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_min -// CHECK: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECK: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECK: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECK: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECK: scf.for {{.*}} to %[[OX]] -// CHECK: scf.for {{.*}} to %[[OY]] -// CHECK: scf.for {{.*}} to %[[WX]] -// CHECK: scf.for {{.*}} to %[[WY]] -// CHECK: %[[IX:.*]] = affine.apply #[[$stride2Dilation1]] -// CHECK: %[[IY:.*]] = affine.apply #[[$stride1Dilation1]] -// CHECK: memref.load {{.*}} : memref -// CHECK: memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref -// CHECK: %[[RES:.*]] = select %{{.*}}, -// CHECK: store %[[RES]], {{.*}} : memref - -// CHECKPARALLEL-LABEL: func @pooling_min -// CHECKPARALLEL: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECKPARALLEL: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECKPARALLEL: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECKPARALLEL: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECKPARALLEL: scf.parallel {{.*}} to (%[[OX]], %[[OY]]) -// CHECKPARALLEL: scf.for {{.*}} to %[[WX]] -// CHECKPARALLEL: scf.for {{.*}} to %[[WY]] -// CHECKPARALLEL: %[[IX:.*]] = affine.apply #[[$stride2Dilation1]] -// CHECKPARALLEL: %[[IY:.*]] = affine.apply #[[$stride1Dilation1]] -// CHECKPARALLEL: memref.load {{.*}} : memref -// CHECKPARALLEL: memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref -// CHECKPARALLEL: %[[RES:.*]] = select %{{.*}}, -// CHECKPARALLEL: store %[[RES]], {{.*}} : memref - -func @pooling_min_padding(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_min(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } : - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_min_padding -// CHECK: %[[PAD:.*]] = constant 0x7F800000 : f32 -// CHECK: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECK: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECK: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECK: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECK: scf.for {{.*}} to %[[OX]] -// CHECK: scf.for {{.*}} to %[[OY]] -// CHECK: scf.for {{.*}} to %[[WX]] -// CHECK: scf.for {{.*}} to %[[WY]] -// CHECK: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECK: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECK: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECK: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECK: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECK: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECK: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32 -// CHECK: %[[CMP:.*]] = cmpf olt, %[[RHS]], %[[SEL]] : f32 -// CHECK: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32 -// CHECK: store %[[RES]], {{.*}} : memref - -// CHECKPARALLEL-LABEL: func @pooling_min_padding -// CHECKPARALLEL: %[[PAD:.*]] = constant 0x7F800000 : f32 -// CHECKPARALLEL: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECKPARALLEL: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECKPARALLEL: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECKPARALLEL: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECKPARALLEL: scf.parallel {{.*}} to (%[[OX]], %[[OY]]) -// CHECKPARALLEL: scf.for {{.*}} to %[[WX]] -// CHECKPARALLEL: scf.for {{.*}} to %[[WY]] -// CHECKPARALLEL: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECKPARALLEL: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECKPARALLEL: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECKPARALLEL: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECKPARALLEL: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECKPARALLEL: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECKPARALLEL: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32 -// CHECKPARALLEL: %[[CMP:.*]] = cmpf olt, %[[RHS]], %[[SEL]] : f32 -// CHECKPARALLEL: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32 -// CHECKPARALLEL: store %[[RES]], {{.*}} : memref - -func @pooling_min_padding_i32(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_min(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } : - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_min_padding_i32 -// CHECK: %[[PAD:.*]] = constant 2147483647 : i32 -// CHECK: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECK: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECK: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECK: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECK: scf.for {{.*}} to %[[OX]] -// CHECK: scf.for {{.*}} to %[[OY]] -// CHECK: scf.for {{.*}} to %[[WX]] -// CHECK: scf.for {{.*}} to %[[WY]] -// CHECK: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECK: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECK: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECK: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECK: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECK: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECK: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32 -// CHECK: %[[CMP:.*]] = cmpi slt, %[[RHS]], %[[SEL]] : i32 -// CHECK: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32 -// CHECK: store %[[RES]], {{.*}} : memref - -// CHECKPARALLEL-LABEL: func @pooling_min_padding_i32 -// CHECKPARALLEL: %[[PAD:.*]] = constant 2147483647 : i32 -// CHECKPARALLEL: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECKPARALLEL: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECKPARALLEL: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECKPARALLEL: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECKPARALLEL: scf.parallel {{.*}} to (%[[OX]], %[[OY]]) -// CHECKPARALLEL: scf.for {{.*}} to %[[WX]] -// CHECKPARALLEL: scf.for {{.*}} to %[[WY]] -// CHECKPARALLEL: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECKPARALLEL: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECKPARALLEL: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECKPARALLEL: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECKPARALLEL: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECKPARALLEL: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECKPARALLEL: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32 -// CHECKPARALLEL: %[[CMP:.*]] = cmpi slt, %[[RHS]], %[[SEL]] : i32 -// CHECKPARALLEL: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32 -// CHECKPARALLEL: store %[[RES]], {{.*}} : memref - -func @pooling_sum(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_sum(%arg0, %arg1, %arg2) { strides = [2, 1] }: - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_sum -// CHECK: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECK: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECK: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECK: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECK: scf.for {{.*}} to %[[OX]] -// CHECK: scf.for {{.*}} to %[[OY]] -// CHECK: scf.for {{.*}} to %[[WX]] -// CHECK: scf.for {{.*}} to %[[WY]] -// CHECK: %[[IX:.*]] = affine.apply #[[$stride2Dilation1]] -// CHECK: %[[IY:.*]] = affine.apply #[[$stride1Dilation1]] -// CHECK: %[[RHS:.*]] = memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref -// CHECK: %[[LHS:.*]] = memref.load {{.*}} : memref -// CHECK: %[[RES:.*]] = addf %[[LHS]], %[[RHS]] : f32 -// CHECK: store %[[RES]], {{.*}} : memref - -// CHECKPARALLEL-LABEL: func @pooling_sum -// CHECKPARALLEL: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECKPARALLEL: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECKPARALLEL: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECKPARALLEL: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECKPARALLEL: scf.parallel {{.*}} to (%[[OX]], %[[OY]]) -// CHECKPARALLEL: scf.for {{.*}} to %[[WX]] -// CHECKPARALLEL: scf.for {{.*}} to %[[WY]] -// CHECKPARALLEL: %[[IX:.*]] = affine.apply #[[$stride2Dilation1]] -// CHECKPARALLEL: %[[IY:.*]] = affine.apply #[[$stride1Dilation1]] -// CHECKPARALLEL: %[[RHS:.*]] = memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref -// CHECKPARALLEL: %[[LHS:.*]] = memref.load {{.*}} : memref -// CHECKPARALLEL: %[[RES:.*]] = addf %[[LHS]], %[[RHS]] : f32 -// CHECKPARALLEL: store %[[RES]], {{.*}} : memref - -func @pooling_sum_padding(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_sum(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } : - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_sum_padding -// CHECK: %[[PAD:.*]] = constant 0.000000e+00 : f32 -// CHECK: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECK: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECK: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECK: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECK: scf.for {{.*}} to %[[OX]] -// CHECK: scf.for {{.*}} to %[[OY]] -// CHECK: scf.for {{.*}} to %[[WX]] -// CHECK: scf.for {{.*}} to %[[WY]] -// CHECK: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECK: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECK: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECK: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECK: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECK: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32 -// CHECK: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECK: %[[RES:.*]] = addf %[[RHS]], %[[SEL]] : f32 -// CHECK: store %[[RES]], {{.*}} : memref - -// CHECKPARALLEL-LABEL: func @pooling_sum_padding -// CHECKPARALLEL: %[[PAD:.*]] = constant 0.000000e+00 : f32 -// CHECKPARALLEL: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECKPARALLEL: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECKPARALLEL: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECKPARALLEL: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECKPARALLEL: scf.parallel {{.*}} to (%[[OX]], %[[OY]]) -// CHECKPARALLEL: scf.for {{.*}} to %[[WX]] -// CHECKPARALLEL: scf.for {{.*}} to %[[WY]] -// CHECKPARALLEL: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECKPARALLEL: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECKPARALLEL: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECKPARALLEL: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECKPARALLEL: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECKPARALLEL: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32 -// CHECKPARALLEL: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECKPARALLEL: %[[RES:.*]] = addf %[[RHS]], %[[SEL]] : f32 -// CHECKPARALLEL: store %[[RES]], {{.*}} : memref - -func @pooling_sum_padding_i32(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_sum(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } : - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_sum_padding_i32 -// CHECK: %[[PAD:.*]] = constant 0 : i32 -// CHECK: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECK: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECK: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECK: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECK: scf.for {{.*}} to %[[OX]] -// CHECK: scf.for {{.*}} to %[[OY]] -// CHECK: scf.for {{.*}} to %[[WX]] -// CHECK: scf.for {{.*}} to %[[WY]] -// CHECK: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECK: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECK: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECK: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECK: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECK: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32 -// CHECK: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECK: %[[RES:.*]] = addi %[[RHS]], %[[SEL]] : i32 -// CHECK: store %[[RES]], {{.*}} : memref - -// CHECKPARALLEL-LABEL: func @pooling_sum_padding_i32 -// CHECKPARALLEL: %[[PAD:.*]] = constant 0 : i32 -// CHECKPARALLEL: %[[WX:.*]] = memref.dim %arg1, %c0 : memref -// CHECKPARALLEL: %[[WY:.*]] = memref.dim %arg1, %c1 : memref -// CHECKPARALLEL: %[[OX:.*]] = memref.dim %arg2, %c0 : memref -// CHECKPARALLEL: %[[OY:.*]] = memref.dim %arg2, %c1 : memref -// CHECKPARALLEL: scf.parallel {{.*}} to (%[[OX]], %[[OY]]) -// CHECKPARALLEL: scf.for {{.*}} to %[[WX]] -// CHECKPARALLEL: scf.for {{.*}} to %[[WY]] -// CHECKPARALLEL: %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]] -// CHECKPARALLEL: %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]] -// CHECKPARALLEL: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]]) -// CHECKPARALLEL: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]]) -// CHECKPARALLEL: %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref -// CHECKPARALLEL: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32 -// CHECKPARALLEL: %[[RHS:.*]] = memref.load {{.*}} : memref -// CHECKPARALLEL: %[[RES:.*]] = addi %[[RHS]], %[[SEL]] : i32 -// CHECKPARALLEL: store %[[RES]], {{.*}} : memref - #accesses = [ affine_map<(i, j, k) -> (i, j)>, affine_map<(i, j, k) -> (i, j, k)>, diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir --- a/mlir/test/Dialect/Linalg/roundtrip.mlir +++ b/mlir/test/Dialect/Linalg/roundtrip.mlir @@ -269,48 +269,6 @@ // ----- -func @pooling_max(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_max(%arg0, %arg1, %arg2) {strides = [2, 1, 2]}: - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_max -// CHECK: linalg.pooling_max(%{{.*}}, %{{.*}}, %{{.*}}) -// CHECK-SAME: {strides = [2, 1, 2]} -// CHECK-SAME: memref, memref, memref - -// ----- - -func @pooling_min(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_min(%arg0, %arg1, %arg2) {strides = [2, 1, 2]}: - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_min -// CHECK: linalg.pooling_min(%{{.*}}, %{{.*}}, %{{.*}}) -// CHECK-SAME: {strides = [2, 1, 2]} -// CHECK-SAME: memref, memref, memref - -// ----- - -func @pooling_sum(%arg0: memref, - %arg1: memref, - %arg2: memref) { - linalg.pooling_sum(%arg0, %arg1, %arg2) {strides = [2, 1, 2]}: - memref, memref, memref - return -} -// CHECK-LABEL: func @pooling_sum -// CHECK: linalg.pooling_sum(%{{.*}}, %{{.*}}, %{{.*}}) -// CHECK-SAME: {strides = [2, 1, 2]} -// CHECK-SAME: memref, memref, memref - -// ----- - #accesses_0 = [ affine_map<(i, j, k) -> (j, i)>, affine_map<(i, j, k) -> ()>,