diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td @@ -397,13 +397,6 @@ && !bufferizableOp.bufferizesToMemoryWrite(opOperand, state) && !bufferizableOp.getAliasingOpResult(opOperand, state).empty(); } - - // TODO: This attribute is deprecated. Use `bufferization.writable` or add - // a new attribute in a different dialect. - /// Attribute name used to mark region arguments that can be bufferized - /// in-place during one-shot bufferization. - constexpr const static ::llvm::StringLiteral - kInplaceableAttrName = "linalg.inplaceable"; }]; } diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -38,11 +38,6 @@ using namespace mlir; using namespace bufferization; -/// Attribute name used to mark region arguments that can be bufferized -/// in-place during linalg comprehensive bufferization. -constexpr const ::llvm::StringLiteral - bufferization::BufferizableOpInterface::kInplaceableAttrName; - /// Return the owner of the given value. static Operation *getOwnerOfValue(Value value) { if (auto opResult = value.dyn_cast()) diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp @@ -325,24 +325,6 @@ return success(); } -/// Set the attribute that triggers inplace bufferization on a FuncOp argument -/// `bbArg`. -static void setInPlaceFuncArgument(BlockArgument bbArg, bool inPlace) { - auto funcOp = cast(bbArg.getOwner()->getParentOp()); - funcOp.setArgAttr(bbArg.getArgNumber(), - BufferizableOpInterface::kInplaceableAttrName, - BoolAttr::get(bbArg.getContext(), inPlace)); -} - -/// Annotate the IR with the result of the analysis. For testing/debugging only. -static void annotateOpsWithBufferizationMarkers(func::FuncOp funcOp, - const AnalysisState &state) { - auto bufferizableOp = cast(funcOp.getOperation()); - for (BlockArgument bbArg : funcOp.getArguments()) - if (bbArg.getType().isa()) - setInPlaceFuncArgument(bbArg, bufferizableOp.isWritable(bbArg, state)); -} - /// Fold return values that are memref casts and update function return types. /// /// During FuncOp bufferization, the exact type of the returned memrefs (if any) @@ -413,10 +395,6 @@ // Mark op as fully analyzed. funcState.analyzedFuncOps[funcOp] = FuncOpAnalysisState::Analyzed; - - // Add annotations to function arguments. - if (options.testAnalysisOnly) - annotateOpsWithBufferizationMarkers(funcOp, state); } return success(); diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp @@ -125,19 +125,6 @@ LogicalResult LinalgDialect::verifyOperationAttribute(Operation *op, NamedAttribute attr) { - using bufferization::BufferizableOpInterface; - - if (attr.getName() == BufferizableOpInterface::kInplaceableAttrName) { - if (!attr.getValue().isa()) { - return op->emitError() - << "'" << BufferizableOpInterface::kInplaceableAttrName - << "' is expected to be a boolean attribute"; - } - if (!isa(op)) - return op->emitError() << "expected " << attr.getName() - << " to be used on function-like operations"; - return success(); - } if (attr.getName() == LinalgDialect::kMemoizedIndexingMapsAttrName) return success(); return op->emitError() << "attribute '" << attr.getName() diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir @@ -1220,8 +1220,8 @@ // CHECK-LABEL: func @write_to_same_tensor_in_loop_out_of_place( func.func @write_to_same_tensor_in_loop_out_of_place( - %A : tensor {linalg.inplaceable = true}, - %B : tensor {linalg.inplaceable = true}, + %A : tensor {bufferization.writable = true}, + %B : tensor {bufferization.writable = true}, %lb : index, %ub : index, %step : index, %sz: index) -> (tensor) { @@ -1248,7 +1248,7 @@ // CHECK-LABEL: func @write_to_same_alloc_tensor_in_place( func.func @write_to_same_alloc_tensor_in_place( - %A : tensor {linalg.inplaceable = true}, + %A : tensor {bufferization.writable = true}, %lb : index, %ub : index, %step : index, %sz: index, %sz2: index) -> (tensor) { @@ -1276,7 +1276,7 @@ // CHECK-LABEL: func @write_to_same_alloc_tensor_out_of_place( func.func @write_to_same_alloc_tensor_out_of_place( - %A : tensor {linalg.inplaceable = true}, + %A : tensor {bufferization.writable = true}, %lb : index, %ub : index, %step : index, %sz: index, %sz2: index, %f: f32) -> (tensor) { diff --git a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir --- a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir +++ b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir @@ -7,9 +7,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_1234( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -34,9 +34,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_1243( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -61,9 +61,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_1324( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -88,9 +88,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_1342( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -115,9 +115,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_1423( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -142,9 +142,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_1432( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -169,9 +169,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_2134( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -196,9 +196,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_2143( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -223,9 +223,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_2314( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -250,9 +250,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_2341( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -277,9 +277,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_2413( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -304,9 +304,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_2431( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -331,9 +331,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_3124( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -358,9 +358,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_3142( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -385,9 +385,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_3214( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<256x256xf32> + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 @@ -411,9 +411,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_3241( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -438,9 +438,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_3412( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -465,9 +465,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_3421( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -492,9 +492,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_4123( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -519,9 +519,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_4132( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -546,9 +546,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_4213( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -573,9 +573,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_4231( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -600,9 +600,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_4312( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index @@ -627,9 +627,9 @@ // CHECK-LABEL: func @fill_extract_matmul_ func.func @fill_extract_matmul_4321( - %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, - %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) + %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false}, + %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true}) -> tensor<256x256xf32> { %c0 = arith.constant 0 : index diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir --- a/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir +++ b/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir @@ -577,7 +577,7 @@ // CHECK-LABEL: func @write_to_same_tensor_in_loop_in_place( func.func @write_to_same_tensor_in_loop_in_place( - %A : tensor {linalg.inplaceable = true}, + %A : tensor {bufferization.writable = true}, %lb : index, %ub : index, %step : index, %sz: index) -> (tensor) { diff --git a/mlir/test/Dialect/Transform/selective-targeting.mlir b/mlir/test/Dialect/Transform/selective-targeting.mlir --- a/mlir/test/Dialect/Transform/selective-targeting.mlir +++ b/mlir/test/Dialect/Transform/selective-targeting.mlir @@ -3,7 +3,7 @@ // CHECK-LABEL: func.func @matmul_tensors_1( func.func @matmul_tensors_1( %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, - %arg2: tensor<128x128xf32> {linalg.inplaceable = true}) + %arg2: tensor<128x128xf32>) -> tensor<128x128xf32> { // This operation is marked for tiling only. // CHECK-COUNT-3: scf.for @@ -19,7 +19,7 @@ func.func @matmul_tensors_2( %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, - %arg2: tensor<128x128xf32> {linalg.inplaceable = true}) + %arg2: tensor<128x128xf32>) -> tensor<128x128xf32> { // This operation is marked f // This operation is marked for tiling and vectorization. @@ -37,7 +37,7 @@ func.func @matmul_tensors_3( %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, - %arg2: tensor<128x128xf32> {linalg.inplaceable = true}) + %arg2: tensor<128x128xf32>) -> tensor<128x128xf32> { // This operation is marked for vectorization only. // CHECK-NOT: scf.for @@ -89,7 +89,7 @@ // CHECK-LABEL: @vectorize_one func.func @vectorize_one( %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, - %arg2: tensor<128x128xf32> {linalg.inplaceable = true}) + %arg2: tensor<128x128xf32>) -> tensor<128x128xf32> { // CHECK: vector.contract %0 = linalg.matmul {test.attrA} @@ -101,7 +101,7 @@ func.func @vectorize_none( %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, - %arg2: tensor<128x128xf32> {linalg.inplaceable = true}) + %arg2: tensor<128x128xf32>) -> tensor<128x128xf32> { // CHECK: linalg.matmul %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>) @@ -134,7 +134,7 @@ // CHECK-LABEL: @vectorize_all func.func @vectorize_all( %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>, - %arg3: tensor<128x128xf32> {linalg.inplaceable = true}) + %arg3: tensor<128x128xf32>) -> tensor<128x128xf32> { // CHECK: vector.contract %0 = linalg.matmul {test.attrA} diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir @@ -9,7 +9,7 @@ #map0 = affine_map<(d0, d1)[s0] -> ((d1 - d0) ceildiv s0)> #map1 = affine_map<(d0, d1)[s0] -> ((d0 - d1) ceildiv s0)> -func.func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: tensor {linalg.inplaceable = true}) -> tensor { +func.func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: tensor) -> tensor { %c64 = arith.constant 64 : index %cst = arith.constant 0.000000e+00 : f32 %c2 = arith.constant 2 : index