diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -117,7 +117,7 @@ SmallVector AnalysisState::getAliasingOpOperand(OpResult result) const { if (Operation *op = result.getDefiningOp()) - if (auto bufferizableOp = dyn_cast(op)) + if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op)) return bufferizableOp.getAliasingOpOperand(result, *this); return {}; } @@ -127,7 +127,7 @@ SmallVector AnalysisState::getAliasingOpResult(OpOperand &opOperand) const { if (auto bufferizableOp = - dyn_cast(opOperand.getOwner())) + getOptions().dynCastBufferizableOp(opOperand.getOwner())) return bufferizableOp.getAliasingOpResult(opOperand, *this); return {}; } @@ -136,7 +136,7 @@ /// op is not bufferizable. bool AnalysisState::bufferizesToMemoryRead(OpOperand &opOperand) const { if (auto bufferizableOp = - dyn_cast(opOperand.getOwner())) + getOptions().dynCastBufferizableOp(opOperand.getOwner())) return bufferizableOp.bufferizesToMemoryRead(opOperand, *this); // Unknown op that returns a tensor. The inplace analysis does not support it. @@ -148,7 +148,7 @@ /// `true` if the op is not bufferizable. bool AnalysisState::bufferizesToMemoryWrite(OpOperand &opOperand) const { if (auto bufferizableOp = - dyn_cast(opOperand.getOwner())) + getOptions().dynCastBufferizableOp(opOperand.getOwner())) return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this); // Unknown op that returns a tensor. The inplace analysis does not support it. @@ -160,7 +160,7 @@ /// alias. Return false if the op is not bufferizable. bool AnalysisState::bufferizesToAliasOnly(OpOperand &opOperand) const { if (auto bufferizableOp = - dyn_cast(opOperand.getOwner())) + getOptions().dynCastBufferizableOp(opOperand.getOwner())) return bufferizableOp.bufferizesToAliasOnly(opOperand, *this); // Unknown op that returns a tensor. The inplace analysis does not support it. diff --git a/mlir/test/Dialect/Linalg/bufferize.mlir b/mlir/test/Dialect/Linalg/bufferize.mlir --- a/mlir/test/Dialect/Linalg/bufferize.mlir +++ b/mlir/test/Dialect/Linalg/bufferize.mlir @@ -189,3 +189,31 @@ // CHECK: %[[OUT_TENSOR:.*]] = bufferization.to_tensor %[[ALLOC]] : memref // CHECK: return %[[OUT_TENSOR]] } + +// ----- + +// This is a regression test. The linalg-bufferize pass should ignore all func +// dialect ops. + +// CHECK-LABEL: func private @csum(tensor<6xi64>) -> tensor<6xi64> +func.func private @csum(%arg0: tensor<6xi64>) -> tensor<6xi64> + +// CHECK: func public @main(%[[arg0:.*]]: tensor<2x3xi1>) +// CHECK: %[[collapse:.*]] = tensor.collapse_shape %[[arg0]] +// CHECK: %[[collapse_m:.*]] = bufferization.to_memref %[[collapse]] +// CHECK: %[[alloc:.*]] = memref.alloc() +// CHECK: linalg.generic {{.*}} ins(%[[collapse_m]] : memref<6xi1>) outs(%[[alloc]] : memref<6xi64>) +// CHECK: %[[generic_t:.*]] = bufferization.to_tensor %[[alloc]] +// CHECK: %[[call:.*]] = call @csum(%[[generic_t]]) +// CHECK: return %[[call]] +func.func public @main(%arg0: tensor<2x3xi1>) -> tensor<6xi64> { + %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<2x3xi1> into tensor<6xi1> + %1 = linalg.init_tensor [6] : tensor<6xi64> + %2 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%0 : tensor<6xi1>) outs(%1 : tensor<6xi64>) { + ^bb0(%arg1: i1, %arg2: i64): + %4 = arith.extui %arg1 : i1 to i64 + linalg.yield %4 : i64 + } -> tensor<6xi64> + %3 = func.call @csum(%2) : (tensor<6xi64>) -> tensor<6xi64> + return %3 : tensor<6xi64> +}