diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td @@ -203,7 +203,7 @@ //===----------------------------------------------------------------------===// def MemRef_BufferCastOp : MemRef_Op<"buffer_cast", - [SameOperandsAndResultShape, SameOperandsAndResultElementType, + [SameOperandsAndResultShape, SameOperandsAndResultElementType, NoSideEffect, TypesMatchWith<"type of 'tensor' is the tensor equivalent of 'memref'", "memref", "tensor", "getTensorTypeFromMemRefType($_self)">]> { @@ -218,6 +218,10 @@ Note, that mutating the result of the buffer cast operation leads to undefined behavior. + + This operation is a specialized variant of the built-in + unrealized_conversion_cast and is intended for use in the context of + gradual bufferization. }]; let arguments = (ins AnyTensor:$tensor); diff --git a/mlir/test/Dialect/Linalg/bufferize.mlir b/mlir/test/Dialect/Linalg/bufferize.mlir --- a/mlir/test/Dialect/Linalg/bufferize.mlir +++ b/mlir/test/Dialect/Linalg/bufferize.mlir @@ -172,17 +172,16 @@ // CHECK: %[[IDX:.*]] = call @make_index() : () -> index %i0 = call @make_index() : () -> index - // CHECK: %[[M0:.*]] = memref.buffer_cast %[[T]] : memref + // CHECK: %[[M:.*]] = memref.buffer_cast %[[T]] : memref // CHECK-NEXT: %[[A0:.*]] = memref.alloc() : memref<2x3xf32> - // CHECK-NEXT: %[[SM0:.*]] = memref.subview %[[M0]][0, 0] [2, 3] [1, 1] + // CHECK-NEXT: %[[SM0:.*]] = memref.subview %[[M]][0, 0] [2, 3] [1, 1] // CHECK-SAME: memref to memref<2x3xf32, #[[$MAP0]]> // CHECK-NEXT: linalg.copy(%[[SM0]], %[[A0]]) : memref<2x3xf32, #[[$MAP0]]>, memref<2x3xf32> // CHECK-NEXT: %[[RT0:.*]] = memref.tensor_load %[[A0]] : memref<2x3xf32> %st0 = subtensor %t[0, 0][2, 3][1, 1] : tensor to tensor<2x3xf32> - // CHECK: %[[M1:.*]] = memref.buffer_cast %[[T]] : memref // CHECK-NEXT: %[[A1:.*]] = memref.alloc(%[[IDX]]) : memref<2x?xf32> - // CHECK-NEXT: %[[SM1:.*]] = memref.subview %[[M1]][0, %[[IDX]]] [2, %[[IDX]]] [1, 2] + // CHECK-NEXT: %[[SM1:.*]] = memref.subview %[[M]][0, %[[IDX]]] [2, %[[IDX]]] [1, 2] // CHECK-SAME: memref to memref<2x?xf32, #[[$MAP1]]> // CHECK-NEXT: linalg.copy(%[[SM1]], %[[A1]]) : memref<2x?xf32, #[[$MAP1]]>, memref<2x?xf32> // CHECK-NEXT: %[[RT1:.*]] = memref.tensor_load %[[A1]] : memref<2x?xf32> @@ -213,26 +212,25 @@ // CHECK: %[[IDX:.*]] = call @make_index() : () -> index - // CHECK-DAG: %[[M0:.*]] = memref.buffer_cast %[[T]] : memref + // CHECK-DAG: %[[M:.*]] = memref.buffer_cast %[[T]] : memref // CHECK-DAG: %[[SM0:.*]] = memref.buffer_cast %[[ST0]] : memref<2x3xf32> // CHECK-NEXT: %[[DIM0:.*]] = memref.dim %[[T]], %[[C0]] : tensor // CHECK-NEXT: %[[DIM1:.*]] = memref.dim %[[T]], %[[C1]] : tensor - // CHECK-NEXT: %[[M0_COPY:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref - // CHECK-NEXT: linalg.copy(%[[M0]], %[[M0_COPY]]) : memref, memref - // CHECK-NEXT: %[[SUBVIEW0:.*]] = memref.subview %[[M0_COPY]][0, 0] [2, 3] [1, 1] + // CHECK-NEXT: %[[M_COPY0:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref + // CHECK-NEXT: linalg.copy(%[[M]], %[[M_COPY0]]) : memref, memref + // CHECK-NEXT: %[[SUBVIEW0:.*]] = memref.subview %[[M_COPY0]][0, 0] [2, 3] [1, 1] // CHECK-SAME: memref to memref<2x3xf32, #[[$MAP0]]> // CHECK-NEXT: linalg.copy(%[[SM0]], %[[SUBVIEW0]]) : memref<2x3xf32>, memref<2x3xf32, #[[$MAP0]]> - // CHECK-NEXT: %[[RT0:.*]] = memref.tensor_load %[[M0_COPY]] : memref + // CHECK-NEXT: %[[RT0:.*]] = memref.tensor_load %[[M_COPY0]] : memref %t0 = subtensor_insert %st0 into %t[0, 0][2, 3][1, 1] : tensor<2x3xf32> into tensor - // CHECK-DAG: %[[M1:.*]] = memref.buffer_cast %[[T]] : memref // CHECK-DAG: %[[SM1:.*]] = memref.buffer_cast %[[ST1]] : memref<2x?xf32> - // CHECK-NEXT: %[[M1_COPY:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref - // CHECK-NEXT: linalg.copy(%[[M1]], %[[M1_COPY]]) : memref, memref - // CHECK-NEXT: %[[SUBVIEW1:.*]] = memref.subview %[[M1_COPY]][0, %[[IDX]]] [2, %[[IDX]]] [1, 2] + // CHECK-NEXT: %[[M_COPY1:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref + // CHECK-NEXT: linalg.copy(%[[M]], %[[M_COPY1]]) : memref, memref + // CHECK-NEXT: %[[SUBVIEW1:.*]] = memref.subview %[[M_COPY1]][0, %[[IDX]]] [2, %[[IDX]]] [1, 2] // CHECK-SAME: memref to memref<2x?xf32, #[[$MAP1]]> // CHECK-NEXT: linalg.copy(%[[SM1]], %[[SUBVIEW1]]) : memref<2x?xf32>, memref<2x?xf32, #[[$MAP1]]> - // CHECK-NEXT: %[[RT1:.*]] = memref.tensor_load %[[M1_COPY]] : memref + // CHECK-NEXT: %[[RT1:.*]] = memref.tensor_load %[[M_COPY1]] : memref %t1 = subtensor_insert %st1 into %t[0, %i0][2, %i0][1, 2] : tensor<2x?xf32> into tensor // CHECK: return %[[RT0]], %[[RT1]]