diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -539,20 +539,6 @@ } namespace { -/// Canonicalize bufferization.to_tensor + bufferization.to_memref. -struct ToTensorToMemrefFolding : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult matchAndRewrite(ToTensorOp toTensorOp, - PatternRewriter &rewriter) const final { - auto toMemrefOp = toTensorOp.getMemref().getDefiningOp(); - if (!toMemrefOp) - return failure(); - rewriter.replaceOp(toTensorOp, toMemrefOp.getTensor()); - return success(); - } -}; - struct DimOfToTensorFolder : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; @@ -571,7 +557,7 @@ void ToTensorOp::getCanonicalizationPatterns(RewritePatternSet &results, MLIRContext *context) { - results.add(context); + results.add(context); } //===----------------------------------------------------------------------===// diff --git a/mlir/test/Dialect/SCF/canonicalize.mlir b/mlir/test/Dialect/SCF/canonicalize.mlir --- a/mlir/test/Dialect/SCF/canonicalize.mlir +++ b/mlir/test/Dialect/SCF/canonicalize.mlir @@ -787,7 +787,8 @@ } // CHECK-NEXT: %[[R0:.*]] = bufferization.to_tensor %[[M0]] : memref<128x128xf32> - // CHECK-NEXT: return %[[R0]], %[[T1]], %[[FOR_RES]] : tensor<128x128xf32>, tensor<128x128xf32>, tensor<128x128xf32> + // CHECK-NEXT: %[[R1:.*]] = bufferization.to_tensor %[[M1]] : memref<128x128xf32> + // CHECK-NEXT: return %[[R0]], %[[R1]], %[[FOR_RES]] : tensor<128x128xf32>, tensor<128x128xf32>, tensor<128x128xf32> return %0#0, %0#1, %0#2 : tensor<128x128xf32>, tensor<128x128xf32>, tensor<128x128xf32> } diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir @@ -109,7 +109,8 @@ // CHECK: scf.yield %[[VAL_84]] : f64 // CHECK: } // CHECK: memref.store %[[VAL_86:.*]], %[[VAL_15]][] : memref -// CHECK: return %[[VAL_0]] : tensor +// CHECK: %[[VAL_87:.*]] = bufferization.to_tensor %[[VAL_15]] : memref +// CHECK: return %[[VAL_87]] : tensor // CHECK: } func.func @sparse_matrix_sum(%argx: tensor {linalg.inplaceable = true}, %arga: tensor<64x32xf64, #SparseMatrix>,