diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -1917,12 +1917,6 @@ // Both source and result stride must have the same static value. In that // case, we can be sure, that the dimensions are collapsible (because they // are contiguous). - // - // One special case is when the srcShape is `1`, in which case it can - // never produce non-contiguity. - if (srcShape[idx] == 1) - continue; - // If `strict = false` (default during op verification), we accept cases // where one or both strides are dynamic. This is best effort: We reject // ops where obviously non-contiguous dims are collapsed, but accept ops diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir --- a/mlir/test/Dialect/Tensor/bufferize.mlir +++ b/mlir/test/Dialect/Tensor/bufferize.mlir @@ -495,6 +495,23 @@ // ----- +// CHECK-LABEL: func @tensor.collapse_shape_of_slice5( +func.func @tensor.collapse_shape_of_slice5(%arg0: tensor<2x2x2xi64>) -> tensor<4xi64> { + // CHECK: %[[subview:.*]] = memref.subview %{{.*}} : memref<2x2x2xi64> to memref<2x1x2xi64, #{{.*}}> + %0 = tensor.extract_slice %arg0[0, 0, 0] [2, 1, 2] [1, 1, 1] : tensor<2x2x2xi64> to tensor<2x1x2xi64> + + // This memref is not collapsible, so the buffer must be copied to get rid of + // the layout map. + // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<2x1x2xi64> + // CHECK: memref.copy %[[subview]], %[[alloc]] + // CHECK: memref.collapse_shape %[[alloc]] [ + // CHECK-SAME: [0, 1, 2]] : memref<2x1x2xi64> into memref<4xi64> + %1 = tensor.collapse_shape %0 [[0, 1, 2]] : tensor<2x1x2xi64> into tensor<4xi64> + return %1 : tensor<4xi64> +} + +// ----- + // CHECK-LABEL: func @tensor.reshape( // CHECK-SAME: %[[t1:.*]]: tensor func.func @tensor.reshape(%t1: tensor) -> tensor<2x2x5xf32> {