diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -302,7 +302,7 @@ // The existing uses of the OpResult still expect a tensor. Insert a // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually // loose all of its users and eventually DCE away. - setInsertionPointAfter(rewriter, replacement); + rewriter.setInsertionPointAfter(op); replacement = rewriter.create( replacement.getLoc(), replacement); } diff --git a/mlir/test/Dialect/Linalg/comprehensive-function-bufferize.mlir b/mlir/test/Dialect/Linalg/comprehensive-function-bufferize.mlir --- a/mlir/test/Dialect/Linalg/comprehensive-function-bufferize.mlir +++ b/mlir/test/Dialect/Linalg/comprehensive-function-bufferize.mlir @@ -30,9 +30,9 @@ // CHECK: %[[dim:.*]] = tensor.dim %[[A]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]]) // CHECK: %[[casted:.*]] = memref.cast %[[alloc]] - // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[casted]] // CHECK: memref.copy %[[A_memref]], %[[alloc]] // CHECK: vector.transfer_write %{{.*}}, %[[alloc]] + // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[casted]] %0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor // CHECK: return %[[res_tensor]] diff --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-alloca.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-alloca.mlir --- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-alloca.mlir +++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-alloca.mlir @@ -34,16 +34,16 @@ // CHECK-NEXT: %[[A:.*]] = memref.alloca() {alignment = 128 : i64} : memref<64xf32> // CHECK-NEXT: %[[B:.*]] = memref.alloca() {alignment = 128 : i64} : memref<64xf32> // CHECK-NEXT: %[[C:.*]] = memref.alloca() {alignment = 128 : i64} : memref + // CHECK-NEXT: %[[cA:.*]] = memref.cast %[[A]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]> + // CHECK-NEXT: %[[cB:.*]] = memref.cast %[[B]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]> + // CHECK-NEXT: %[[cC:.*]] = memref.cast %[[C]] : memref to memref %A = linalg.init_tensor [64] : tensor<64xf32> %B = linalg.init_tensor [64] : tensor<64xf32> %C = linalg.init_tensor [] : tensor // CHECK-NEXT: linalg.fill(%[[C1]], %[[A]]) : f32, memref<64xf32> - // CHECK-NEXT: %[[cA:.*]] = memref.cast %[[A]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]> // CHECK-NEXT: linalg.fill(%[[C2]], %[[B]]) : f32, memref<64xf32> - // CHECK-NEXT: %[[cB:.*]] = memref.cast %[[B]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]> // CHECK-NEXT: linalg.fill(%[[C0]], %[[C]]) : f32, memref - // CHECK-NEXT: %[[cC:.*]] = memref.cast %[[C]] : memref to memref %AA = linalg.fill(%v1, %A) : f32, tensor<64xf32> -> tensor<64xf32> %BB = linalg.fill(%v2, %B) : f32, tensor<64xf32> -> tensor<64xf32> %CC = linalg.fill(%v0, %C) : f32, tensor -> tensor diff --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir --- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir +++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir @@ -597,16 +597,16 @@ // CHECK-NEXT: %[[A:.*]] = memref.alloc() {alignment = 128 : i64} : memref<64xf32> // CHECK-NEXT: %[[B:.*]] = memref.alloc() {alignment = 128 : i64} : memref<64xf32> // CHECK-NEXT: %[[C:.*]] = memref.alloc() {alignment = 128 : i64} : memref + // CHECK-NEXT: %[[cA:.*]] = memref.cast %[[A]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]> + // CHECK-NEXT: %[[cB:.*]] = memref.cast %[[B]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]> + // CHECK-NEXT: %[[cC:.*]] = memref.cast %[[C]] : memref to memref %A = linalg.init_tensor [64] : tensor<64xf32> %B = linalg.init_tensor [64] : tensor<64xf32> %C = linalg.init_tensor [] : tensor // CHECK-NEXT: linalg.fill(%[[C1]], %[[A]]) : f32, memref<64xf32> - // CHECK-NEXT: %[[cA:.*]] = memref.cast %[[A]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]> // CHECK-NEXT: linalg.fill(%[[C2]], %[[B]]) : f32, memref<64xf32> - // CHECK-NEXT: %[[cB:.*]] = memref.cast %[[B]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]> // CHECK-NEXT: linalg.fill(%[[C0]], %[[C]]) : f32, memref - // CHECK-NEXT: %[[cC:.*]] = memref.cast %[[C]] : memref to memref %AA = linalg.fill(%v1, %A) : f32, tensor<64xf32> -> tensor<64xf32> %BB = linalg.fill(%v2, %B) : f32, tensor<64xf32> -> tensor<64xf32> %CC = linalg.fill(%v0, %C) : f32, tensor -> tensor