diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td @@ -95,6 +95,7 @@ PredOpTrait<"input and output have same element type", TCopVTEtIsSameAs<0, 1>>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, DeclareOpInterfaceMethods> + &effects) { + getGenericEffectsImpl(effects, getOperation()->getResults(), + getDpsInputOperands(), getDpsInitOperands()); +} + // Helper functions for softmax decomposition. // @{ diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir --- a/mlir/test/Dialect/Linalg/canonicalize.mlir +++ b/mlir/test/Dialect/Linalg/canonicalize.mlir @@ -897,3 +897,14 @@ // CHECK-SAME: iterator_types = ["parallel"] // CHECK-SAME: } ins(%[[ARG1]] : tensor<5xf32>) // CHECK-SAME: outs(%[[ARG2]] : memref) { + +// ----- + +// CHECK-LABEL: dead_softmax +func.func @dead_softmax(%arg0: tensor<16x64x256xf32>) -> tensor<16x64x256xf32> { + %0 = tensor.empty() : tensor<16x64x256xf32> + // CHECK-NOT: linalg.softmax + %1 = linalg.softmax dimension(1) + ins(%arg0 : tensor<16x64x256xf32>) outs(%0 : tensor<16x64x256xf32>) -> tensor<16x64x256xf32> + return %arg0 : tensor<16x64x256xf32> +}