diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -1707,32 +1707,42 @@ /// For ViewLikeOpInterface. Value SubViewOp::getViewSource() { return source(); } +/// Return true if t1 and t2 have equal offsets (both dynamic or of same static +/// value). +static bool haveCompatibleOffsets(MemRefType t1, MemRefType t2) { + AffineExpr t1Offset, t2Offset; + SmallVector t1Strides, t2Strides; + auto res1 = getStridesAndOffset(t1, t1Strides, t1Offset); + auto res2 = getStridesAndOffset(t2, t2Strides, t2Offset); + return succeeded(res1) && succeeded(res2) && t1Offset == t2Offset; +} + /// Checks if `original` Type type can be rank reduced to `reduced` type. /// This function is slight variant of `is subsequence` algorithm where /// not matching dimension must be 1. static SliceVerificationResult isRankReducedMemRefType(MemRefType originalType, - MemRefType candidatecandidateReducedType, + MemRefType candidateRankReducedType, ArrayRef sizes) { - auto partialRes = - isRankReducedType(originalType, candidatecandidateReducedType); + auto partialRes = isRankReducedType(originalType, candidateRankReducedType); if (partialRes != SliceVerificationResult::Success) return partialRes; - MemRefType original = originalType.cast(); - MemRefType candidateReduced = - candidatecandidateReducedType.cast(); - - auto optionalUnusedDimsMask = - computeMemRefRankReductionMask(original, candidateReduced, sizes); + auto optionalUnusedDimsMask = computeMemRefRankReductionMask( + originalType, candidateRankReducedType, sizes); // Sizes cannot be matched in case empty vector is returned. if (!optionalUnusedDimsMask.hasValue()) return SliceVerificationResult::LayoutMismatch; - if (original.getMemorySpace() != candidateReduced.getMemorySpace()) + if (originalType.getMemorySpace() != + candidateRankReducedType.getMemorySpace()) return SliceVerificationResult::MemSpaceMismatch; + // No amount of stride dropping can reconcile incompatible offsets. + if (!haveCompatibleOffsets(originalType, candidateRankReducedType)) + return SliceVerificationResult::LayoutMismatch; + return SliceVerificationResult::Success; } diff --git a/mlir/test/Dialect/MemRef/invalid.mlir b/mlir/test/Dialect/MemRef/invalid.mlir --- a/mlir/test/Dialect/MemRef/invalid.mlir +++ b/mlir/test/Dialect/MemRef/invalid.mlir @@ -660,6 +660,42 @@ // ----- +#map0 = affine_map<(d0, d1)[s0] -> (d0 * 16 + d1)> + +func @subview_bad_offset_1(%arg0: memref<16x16xf32>) { + %c0 = arith.constant 0 : index + %c8 = arith.constant 8 : index + // expected-error @+1 {{expected result type to be 'memref<8x8xf32, affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>>' or a rank-reduced version}} + %s2 = memref.subview %arg0[%c8, %c8][8, 8][1, 1] : memref<16x16xf32> to memref<8x8xf32, #map0> + return +} + +// ----- + +#map0 = affine_map<(d0, d1)[s0] -> (d0 * 16 + d1 + 136)> + +func @subview_bad_offset_2(%arg0: memref<16x16xf32>) { + %c0 = arith.constant 0 : index + %c8 = arith.constant 8 : index + // expected-error @+1 {{expected result type to be 'memref<8x8xf32, affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>>' or a rank-reduced version}} + %s2 = memref.subview %arg0[%c8, 8][8, 8][1, 1] : memref<16x16xf32> to memref<8x8xf32, #map0> + return +} + +// ----- + +#map0 = affine_map<(d0, d1)[s0] -> (d0 * 16 + d1 + s0 * 437)> + +func @subview_bad_offset_3(%arg0: memref<16x16xf32>) { + %c0 = arith.constant 0 : index + %c8 = arith.constant 8 : index + // expected-error @+1 {{expected result type to be 'memref<8x8xf32, affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>>' or a rank-reduced version}} + %s2 = memref.subview %arg0[%c8, 8][8, 8][1, 1] : memref<16x16xf32> to memref<8x8xf32, #map0> + return +} + +// ----- + func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) { // expected-error@+1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 32 + d2 * 2)>>' are cast incompatible}} %0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:0, strides:[128, 32, 2]> diff --git a/mlir/test/Dialect/MemRef/subview.mlir b/mlir/test/Dialect/MemRef/subview.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/MemRef/subview.mlir @@ -0,0 +1,138 @@ +// RUN: mlir-opt %s | mlir-opt | FileCheck %s +// RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s + +// CHECK-DAG: #[[$BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> +// CHECK-DAG: #[[$BASE_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)> + +// CHECK-DAG: #[[$BASE_MAP1:map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0)> +// CHECK-DAG: #[[$SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)> + +// CHECK-DAG: #[[$BASE_MAP2:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 22 + d1)> +// CHECK-DAG: #[[$SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> +// CHECK-DAG: #[[$SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)> +// CHECK-DAG: #[[$SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> +// CHECK-DAG: #[[$SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1 * 2)> +// CHECK-DAG: #[[$SUBVIEW_MAP6:map[0-9]+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0 * 36 + d1 * 36 + d2 * 4 + d3 * 4 + d4)> +// CHECK-DAG: #[[$SUBVIEW_MAP7:map[0-9]+]] = affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5, s6] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5 + d5 * s6)> +// CHECK-DAG: #[[$SUBVIEW_MAP8:map[0-9]+]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3, s4] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4)> +// CHECK-DAG: #[[$SUBVIEW_MAP9:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 3 + d1 + 6)> +// CHECK-DAG: #[[$SUBVIEW_MAP10:map[0-9]+]] = affine_map<(d0) -> (d0 + 3)> +// CHECK-DAG: #[[$SUBVIEW_MAP11:map[0-9]+]] = affine_map<() -> (4)> +// CHECK-DAG: #[[$SUBVIEW_MAP12:map[0-9]+]] = affine_map<()[s0] -> (s0)> + +// CHECK-LABEL: func @memref_subview(%arg0 +func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + + %0 = memref.alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> + // CHECK: subview %0[%c0, %c0, %c0] [%arg0, %arg1, %arg2] [%c1, %c1, %c1] : + // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> + // CHECK-SAME: to memref + %1 = memref.subview %0[%c0, %c0, %c0][%arg0, %arg1, %arg2][%c1, %c1, %c1] + : memref<8x16x4xf32, offset:0, strides: [64, 4, 1]> to + memref + + %2 = memref.alloc()[%arg2] : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> + // CHECK: memref.subview %2[%c1] [%arg0] [%c1] : + // CHECK-SAME: memref<64xf32, #[[$BASE_MAP1]]> + // CHECK-SAME: to memref + %3 = memref.subview %2[%c1][%arg0][%c1] + : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> to + memref (d0 * s1 + s0)>> + + %4 = memref.alloc() : memref<64x22xf32, affine_map<(d0, d1) -> (d0 * 22 + d1)>> + // CHECK: memref.subview %4[%c0, %c1] [%arg0, %arg1] [%c1, %c0] : + // CHECK-SAME: memref<64x22xf32, #[[$BASE_MAP2]]> + // CHECK-SAME: to memref + %5 = memref.subview %4[%c0, %c1][%arg0, %arg1][%c1, %c0] + : memref<64x22xf32, offset:0, strides: [22, 1]> to + memref + + // CHECK: memref.subview %0[0, 2, 0] [4, 4, 4] [1, 1, 1] : + // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> + // CHECK-SAME: to memref<4x4x4xf32, #[[$SUBVIEW_MAP3]]> + %6 = memref.subview %0[0, 2, 0][4, 4, 4][1, 1, 1] + : memref<8x16x4xf32, offset:0, strides: [64, 4, 1]> to + memref<4x4x4xf32, offset:8, strides: [64, 4, 1]> + + %7 = memref.alloc(%arg1, %arg2) : memref + // CHECK: memref.subview {{%.*}}[0, 0] [4, 4] [1, 1] : + // CHECK-SAME: memref + // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP4]]> + %8 = memref.subview %7[0, 0][4, 4][1, 1] + : memref to memref<4x4xf32, offset: ?, strides:[?, 1]> + + %9 = memref.alloc() : memref<16x4xf32> + // CHECK: memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [4, 4] [{{%.*}}, {{%.*}}] : + // CHECK-SAME: memref<16x4xf32> + // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP2]] + %10 = memref.subview %9[%arg1, %arg1][4, 4][%arg2, %arg2] + : memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[?, ?]> + + // CHECK: memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [4, 4] [2, 2] : + // CHECK-SAME: memref<16x4xf32> + // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP5]] + %11 = memref.subview %9[%arg1, %arg2][4, 4][2, 2] + : memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[8, 2]> + + %12 = memref.alloc() : memref<1x9x1x4x1xf32, affine_map<(d0, d1, d2, d3, d4) -> (36 * d0 + 36 * d1 + 4 * d2 + 4 * d3 + d4)>> + // CHECK: memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1] + // CHECK-SAME: [1, 9, 1, 4, 1] [%arg2, %arg2, %arg2, %arg2, %arg2] : + // CHECK-SAME: memref<1x9x1x4x1xf32, #[[$SUBVIEW_MAP6]]> to memref<9x4xf32, #[[$SUBVIEW_MAP2]]> + %13 = memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1][1, 9, 1, 4, 1][%arg2, %arg2, %arg2, %arg2, %arg2] : memref<1x9x1x4x1xf32, offset: 0, strides: [36, 36, 4, 4, 1]> to memref<9x4xf32, offset: ?, strides: [?, ?]> + // CHECK: memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1] + // CHECK-SAME: [1, 9, 1, 4, 1] [%arg2, %arg2, %arg2, %arg2, %arg2] : + // CHECK-SAME: memref<1x9x1x4x1xf32, #[[$SUBVIEW_MAP6]]> to memref<1x9x4xf32, #[[$BASE_MAP3]]> + %14 = memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1][1, 9, 1, 4, 1][%arg2, %arg2, %arg2, %arg2, %arg2] : memref<1x9x1x4x1xf32, offset: 0, strides: [36, 36, 4, 4, 1]> to memref<1x9x4xf32, offset: ?, strides: [?, ?, ?]> + + %15 = memref.alloc(%arg1, %arg2)[%c0, %c1, %arg1, %arg0, %arg0, %arg2, %arg2] : memref<1x?x5x1x?x1xf32, affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5, s6] -> (s0 + s1 * d0 + s2 * d1 + s3 * d2 + s4 * d3 + s5 * d4 + s6 * d5)>> + // CHECK: memref.subview %15[0, 0, 0, 0, 0, 0] [1, %arg1, 5, 1, %arg2, 1] [1, 1, 1, 1, 1, 1] : + // CHECK-SAME: memref<1x?x5x1x?x1xf32, #[[$SUBVIEW_MAP7]]> to memref + %16 = memref.subview %15[0, 0, 0, 0, 0, 0][1, %arg1, 5, 1, %arg2, 1][1, 1, 1, 1, 1, 1] : memref<1x?x5x1x?x1xf32, offset: ?, strides: [?, ?, ?, ?, ?, ?]> to memref + // CHECK: memref.subview %15[%arg1, %arg1, %arg1, %arg1, %arg1, %arg1] [1, %arg1, 5, 1, %arg2, 1] [1, 1, 1, 1, 1, 1] : + // CHECK-SAME: memref<1x?x5x1x?x1xf32, #[[$SUBVIEW_MAP7]]> to memref + %17 = memref.subview %15[%arg1, %arg1, %arg1, %arg1, %arg1, %arg1][1, %arg1, 5, 1, %arg2, 1][1, 1, 1, 1, 1, 1] : memref<1x?x5x1x?x1xf32, offset: ?, strides: [?, ?, ?, ?, ?, ?]> to memref + + %18 = memref.alloc() : memref<1x8xf32> + // CHECK: memref.subview %18[0, 0] [1, 8] [1, 1] : memref<1x8xf32> to memref<8xf32> + %19 = memref.subview %18[0, 0][1, 8][1, 1] : memref<1x8xf32> to memref<8xf32> + + %20 = memref.alloc() : memref<8x16x4xf32> + // CHECK: memref.subview %20[0, 0, 0] [1, 16, 4] [1, 1, 1] : memref<8x16x4xf32> to memref<16x4xf32> + %21 = memref.subview %20[0, 0, 0][1, 16, 4][1, 1, 1] : memref<8x16x4xf32> to memref<16x4xf32> + + %22 = memref.subview %20[3, 4, 2][1, 6, 3][1, 1, 1] : memref<8x16x4xf32> to memref<6x3xf32, offset: 210, strides: [4, 1]> + + %23 = memref.alloc() : memref + %78 = memref.subview %23[] [] [] : memref to memref + + /// Subview with only leading operands. + %24 = memref.alloc() : memref<5x3xf32> + // CHECK: memref.subview %{{.*}}[2] [3] [1] : memref<5x3xf32> to memref<3x3xf32, #[[$SUBVIEW_MAP9]]> + %25 = memref.subview %24[2][3][1]: memref<5x3xf32> to memref<3x3xf32, offset: 6, strides: [3, 1]> + + /// Rank-reducing subview with only leading operands. + // CHECK: memref.subview %{{.*}}[1] [1] [1] : memref<5x3xf32> to memref<3xf32, #[[$SUBVIEW_MAP10]]> + %26 = memref.subview %24[1][1][1]: memref<5x3xf32> to memref<3xf32, offset: 3, strides: [1]> + + // Corner-case of 0-D rank-reducing subview with an offset. + // CHECK: memref.subview %{{.*}}[1, 1] [1, 1] [1, 1] : memref<5x3xf32> to memref + %27 = memref.subview %24[1, 1] [1, 1] [1, 1] : memref<5x3xf32> to memref (4)>> + + // CHECK: memref.subview %{{.*}}[%{{.*}}, 1] [1, 1] [1, 1] : memref<5x3xf32> to memref + %28 = memref.subview %24[%arg0, 1] [1, 1] [1, 1] : memref<5x3xf32> to memref (s0)>> + + // CHECK: memref.subview %{{.*}}[0, %{{.*}}] [%{{.*}}, 1] [1, 1] : memref to memref + %a30 = memref.alloc(%arg0, %arg0) : memref + %30 = memref.subview %a30[0, %arg1][%arg2, 1][1, 1] : memref to memref (d0 * s1 + s0)>> + + %c8 = arith.constant 8 : index + %a40 = memref.alloc() : memref<16x16xf32> + // CHECK: memref.subview + %40 = memref.subview %a40[%c8, 8][8, 8][1, 1] : + memref<16x16xf32> to memref<8x8xf32, affine_map<(d0, d1)[s0] -> (d0 * 16 + d1 + s0)>> + + return +} + diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir --- a/mlir/test/IR/core-ops.mlir +++ b/mlir/test/IR/core-ops.mlir @@ -11,22 +11,6 @@ // CHECK-DAG: #[[$BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> // CHECK-DAG: #[[$BASE_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)> -// CHECK-DAG: #[[$BASE_MAP1:map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0)> -// CHECK-DAG: #[[$SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)> - -// CHECK-DAG: #[[$BASE_MAP2:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 22 + d1)> -// CHECK-DAG: #[[$SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> -// CHECK-DAG: #[[$SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)> -// CHECK-DAG: #[[$SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> -// CHECK-DAG: #[[$SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1 * 2)> -// CHECK-DAG: #[[$SUBVIEW_MAP6:map[0-9]+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0 * 36 + d1 * 36 + d2 * 4 + d3 * 4 + d4)> -// CHECK-DAG: #[[$SUBVIEW_MAP7:map[0-9]+]] = affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5, s6] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5 + d5 * s6)> -// CHECK-DAG: #[[$SUBVIEW_MAP8:map[0-9]+]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3, s4] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4)> -// CHECK-DAG: #[[$SUBVIEW_MAP9:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 3 + d1 + 6)> -// CHECK-DAG: #[[$SUBVIEW_MAP10:map[0-9]+]] = affine_map<(d0) -> (d0 + 3)> -// CHECK-DAG: #[[$SUBVIEW_MAP11:map[0-9]+]] = affine_map<() -> (4)> -// CHECK-DAG: #[[$SUBVIEW_MAP12:map[0-9]+]] = affine_map<()[s0] -> (s0)> - // CHECK-LABEL: func @func_with_ops // CHECK-SAME: %[[ARG:.*]]: f32 func @func_with_ops(f32) { @@ -305,116 +289,6 @@ return } -// CHECK-LABEL: func @memref_subview(%arg0 -func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %c0 = arith.constant 0 : index - %c1 = arith.constant 1 : index - - %0 = memref.alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> - // CHECK: subview %0[%c0, %c0, %c0] [%arg0, %arg1, %arg2] [%c1, %c1, %c1] : - // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> - // CHECK-SAME: to memref - %1 = memref.subview %0[%c0, %c0, %c0][%arg0, %arg1, %arg2][%c1, %c1, %c1] - : memref<8x16x4xf32, offset:0, strides: [64, 4, 1]> to - memref - - %2 = memref.alloc()[%arg2] : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> - // CHECK: memref.subview %2[%c1] [%arg0] [%c1] : - // CHECK-SAME: memref<64xf32, #[[$BASE_MAP1]]> - // CHECK-SAME: to memref - %3 = memref.subview %2[%c1][%arg0][%c1] - : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> to - memref (d0 * s1 + s0)>> - - %4 = memref.alloc() : memref<64x22xf32, affine_map<(d0, d1) -> (d0 * 22 + d1)>> - // CHECK: memref.subview %4[%c0, %c1] [%arg0, %arg1] [%c1, %c0] : - // CHECK-SAME: memref<64x22xf32, #[[$BASE_MAP2]]> - // CHECK-SAME: to memref - %5 = memref.subview %4[%c0, %c1][%arg0, %arg1][%c1, %c0] - : memref<64x22xf32, offset:0, strides: [22, 1]> to - memref - - // CHECK: memref.subview %0[0, 2, 0] [4, 4, 4] [1, 1, 1] : - // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> - // CHECK-SAME: to memref<4x4x4xf32, #[[$SUBVIEW_MAP3]]> - %6 = memref.subview %0[0, 2, 0][4, 4, 4][1, 1, 1] - : memref<8x16x4xf32, offset:0, strides: [64, 4, 1]> to - memref<4x4x4xf32, offset:8, strides: [64, 4, 1]> - - %7 = memref.alloc(%arg1, %arg2) : memref - // CHECK: memref.subview {{%.*}}[0, 0] [4, 4] [1, 1] : - // CHECK-SAME: memref - // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP4]]> - %8 = memref.subview %7[0, 0][4, 4][1, 1] - : memref to memref<4x4xf32, offset: ?, strides:[?, 1]> - - %9 = memref.alloc() : memref<16x4xf32> - // CHECK: memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [4, 4] [{{%.*}}, {{%.*}}] : - // CHECK-SAME: memref<16x4xf32> - // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP2]] - %10 = memref.subview %9[%arg1, %arg1][4, 4][%arg2, %arg2] - : memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[?, ?]> - - // CHECK: memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [4, 4] [2, 2] : - // CHECK-SAME: memref<16x4xf32> - // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP5]] - %11 = memref.subview %9[%arg1, %arg2][4, 4][2, 2] - : memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[8, 2]> - - %12 = memref.alloc() : memref<1x9x1x4x1xf32, affine_map<(d0, d1, d2, d3, d4) -> (36 * d0 + 36 * d1 + 4 * d2 + 4 * d3 + d4)>> - // CHECK: memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1] - // CHECK-SAME: [1, 9, 1, 4, 1] [%arg2, %arg2, %arg2, %arg2, %arg2] : - // CHECK-SAME: memref<1x9x1x4x1xf32, #[[$SUBVIEW_MAP6]]> to memref<9x4xf32, #[[$SUBVIEW_MAP2]]> - %13 = memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1][1, 9, 1, 4, 1][%arg2, %arg2, %arg2, %arg2, %arg2] : memref<1x9x1x4x1xf32, offset: 0, strides: [36, 36, 4, 4, 1]> to memref<9x4xf32, offset: ?, strides: [?, ?]> - // CHECK: memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1] - // CHECK-SAME: [1, 9, 1, 4, 1] [%arg2, %arg2, %arg2, %arg2, %arg2] : - // CHECK-SAME: memref<1x9x1x4x1xf32, #[[$SUBVIEW_MAP6]]> to memref<1x9x4xf32, #[[$BASE_MAP3]]> - %14 = memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1][1, 9, 1, 4, 1][%arg2, %arg2, %arg2, %arg2, %arg2] : memref<1x9x1x4x1xf32, offset: 0, strides: [36, 36, 4, 4, 1]> to memref<1x9x4xf32, offset: ?, strides: [?, ?, ?]> - - %15 = memref.alloc(%arg1, %arg2)[%c0, %c1, %arg1, %arg0, %arg0, %arg2, %arg2] : memref<1x?x5x1x?x1xf32, affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5, s6] -> (s0 + s1 * d0 + s2 * d1 + s3 * d2 + s4 * d3 + s5 * d4 + s6 * d5)>> - // CHECK: memref.subview %15[0, 0, 0, 0, 0, 0] [1, %arg1, 5, 1, %arg2, 1] [1, 1, 1, 1, 1, 1] : - // CHECK-SAME: memref<1x?x5x1x?x1xf32, #[[$SUBVIEW_MAP7]]> to memref - %16 = memref.subview %15[0, 0, 0, 0, 0, 0][1, %arg1, 5, 1, %arg2, 1][1, 1, 1, 1, 1, 1] : memref<1x?x5x1x?x1xf32, offset: ?, strides: [?, ?, ?, ?, ?, ?]> to memref - // CHECK: memref.subview %15[%arg1, %arg1, %arg1, %arg1, %arg1, %arg1] [1, %arg1, 5, 1, %arg2, 1] [1, 1, 1, 1, 1, 1] : - // CHECK-SAME: memref<1x?x5x1x?x1xf32, #[[$SUBVIEW_MAP7]]> to memref - %17 = memref.subview %15[%arg1, %arg1, %arg1, %arg1, %arg1, %arg1][1, %arg1, 5, 1, %arg2, 1][1, 1, 1, 1, 1, 1] : memref<1x?x5x1x?x1xf32, offset: ?, strides: [?, ?, ?, ?, ?, ?]> to memref - - %18 = memref.alloc() : memref<1x8xf32> - // CHECK: memref.subview %18[0, 0] [1, 8] [1, 1] : memref<1x8xf32> to memref<8xf32> - %19 = memref.subview %18[0, 0][1, 8][1, 1] : memref<1x8xf32> to memref<8xf32> - - %20 = memref.alloc() : memref<8x16x4xf32> - // CHECK: memref.subview %20[0, 0, 0] [1, 16, 4] [1, 1, 1] : memref<8x16x4xf32> to memref<16x4xf32> - %21 = memref.subview %20[0, 0, 0][1, 16, 4][1, 1, 1] : memref<8x16x4xf32> to memref<16x4xf32> - - %22 = memref.subview %20[3, 4, 2][1, 6, 3][1, 1, 1] : memref<8x16x4xf32> to memref<6x3xf32, offset: 210, strides: [4, 1]> - - %23 = memref.alloc() : memref - %78 = memref.subview %23[] [] [] : memref to memref - - /// Subview with only leading operands. - %24 = memref.alloc() : memref<5x3xf32> - // CHECK: memref.subview %{{.*}}[2] [3] [1] : memref<5x3xf32> to memref<3x3xf32, #[[$SUBVIEW_MAP9]]> - %25 = memref.subview %24[2][3][1]: memref<5x3xf32> to memref<3x3xf32, offset: 6, strides: [3, 1]> - - /// Rank-reducing subview with only leading operands. - // CHECK: memref.subview %{{.*}}[1] [1] [1] : memref<5x3xf32> to memref<3xf32, #[[$SUBVIEW_MAP10]]> - %26 = memref.subview %24[1][1][1]: memref<5x3xf32> to memref<3xf32, offset: 3, strides: [1]> - - // Corner-case of 0-D rank-reducing subview with an offset. - // CHECK: memref.subview %{{.*}}[1, 1] [1, 1] [1, 1] : memref<5x3xf32> to memref - %27 = memref.subview %24[1, 1] [1, 1] [1, 1] : memref<5x3xf32> to memref (4)>> - - // CHECK: memref.subview %{{.*}}[%{{.*}}, 1] [1, 1] [1, 1] : memref<5x3xf32> to memref - %28 = memref.subview %24[%arg0, 1] [1, 1] [1, 1] : memref<5x3xf32> to memref (s0)>> - - // CHECK: memref.subview %{{.*}}[0, %{{.*}}] [%{{.*}}, 1] [1, 1] : memref to memref - %a30 = memref.alloc(%arg0, %arg0) : memref - %30 = memref.subview %a30[0, %arg1][%arg2, 1][1, 1] : memref to memref (d0 * s1 + s0)>> - - return -} - // CHECK-LABEL: func @test_dimop // CHECK-SAME: %[[ARG:.*]]: tensor<4x4x?xf32> func @test_dimop(%arg0: tensor<4x4x?xf32>) {