diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -67,8 +67,7 @@ // op won't be top-level anymore after inlining. Attribute operandCst; return matchPattern(value.getDefiningOp(), m_Constant(&operandCst)) || - value.getDefiningOp() || - value.getDefiningOp(); + value.getDefiningOp(); } /// Checks if all values known to be legal affine dimensions or symbols in `src` @@ -304,8 +303,6 @@ // level. if (auto dimOp = dyn_cast(op)) return isTopLevelValue(dimOp.source()); - if (auto dimOp = dyn_cast(op)) - return isTopLevelValue(dimOp.source()); return false; } @@ -418,8 +415,6 @@ // Dim op results could be valid symbols at any level. if (auto dimOp = dyn_cast(defOp)) return isDimOpValidSymbol(dimOp, region); - if (auto dimOp = dyn_cast(defOp)) - return isDimOpValidSymbol(dimOp, region); // Check for values dominating `region`'s parent op. Operation *regionOp = region ? region->getParentOp() : nullptr; diff --git a/mlir/test/Dialect/Linalg/fusion-sequence.mlir b/mlir/test/Dialect/Linalg/fusion-sequence.mlir --- a/mlir/test/Dialect/Linalg/fusion-sequence.mlir +++ b/mlir/test/Dialect/Linalg/fusion-sequence.mlir @@ -213,7 +213,7 @@ } // CHECK: #[[MAP0:.+]] = affine_map<(d0)[s0] -> (16, -d0 + s0)> -// CHECK: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s0, 16, -d0 + s1)> +// CHECK: #[[MAP1:.+]] = affine_map<(d0, d1)[s0] -> (-d0 + d1, 16, -d0 + s0)> // CHECK: func @tensor_matmul_fusion( // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor @@ -232,7 +232,7 @@ // CHECK: %[[N3:.+]] = tensor.dim %[[ARG8]], %[[C1]] // CHECK: %[[STARG6:.+]] = tensor.extract_slice %[[ARG8]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_1]], %[[N3]]] -// CHECK: %[[TILE_M_2:.+]] = affine.min #[[MAP1]](%[[IV0]])[%[[M]], %[[M]]] +// CHECK: %[[TILE_M_2:.+]] = affine.min #[[MAP1]](%[[IV0]], %[[M]])[%[[M]]] // CHECK: %[[N2:.+]] = tensor.dim %[[ARG4]], %[[C1]] // CHECK: %[[STARG4:.+]] = tensor.extract_slice %[[ARG4]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_2]], %[[N2]]] diff --git a/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir b/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir --- a/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir +++ b/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir @@ -14,9 +14,9 @@ } } // CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0] -> (32, -d0 + s0)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0)[s0] -> (16, -d0 + s0)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0)[s0] -> (64, -d0 + s0)> -// CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s0, 32, -d0 + s1)> +// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (16, -d0 + d1)> +// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1) -> (64, -d0 + d1)> +// CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1)[s0] -> (-d0 + d1, 32, -d0 + s0)> // CHECK: func @matmul_fusion // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor @@ -38,7 +38,7 @@ // CHECK: %[[N3:.+]] = tensor.dim %[[ARG6]], %[[C1]] // CHECK: %[[ST_ARG6:.+]] = tensor.extract_slice %[[ARG6]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_2]], %[[N3]]] -// CHECK: %[[TILE_M_3:.+]] = affine.min #[[MAP5]](%[[IV0]])[%[[M]], %[[M]]] +// CHECK: %[[TILE_M_3:.+]] = affine.min #[[MAP5]](%[[IV0]], %[[M]])[%[[M]]] // CHECK: %[[N1:.+]] = tensor.dim %[[ARG0]], %[[C1]] // CHECK: %[[ST_ARG0:.+]] = tensor.extract_slice %[[ARG0]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_3]], %[[N1]]] @@ -57,10 +57,10 @@ // CHECK: %[[YIELD1:.+]] = scf.for %[[IV2:[a-zA-Z0-9]+]] = // CHECK-SAME: %[[C0]] to %[[N2]] step %[[C16]] // CHECK-SAME: iter_args(%[[ARG10:.+]] = %[[ARG8]]) -> (tensor) { -// CHECK: %[[TILE_N2:.+]] = affine.min #[[MAP2]](%[[IV2]])[%[[N2]]] +// CHECK: %[[TILE_N2:.+]] = affine.min #[[MAP2]](%[[IV2]], %[[N2]]) // CHECK: %[[ST_LHS:.+]] = tensor.extract_slice %[[LHS]][0, %[[IV2]]] // CHECK-SAME: [%[[TILE_M_3]], %[[TILE_N2]]] -// CHECK: %[[TILE_N3:.+]] = affine.min #[[MAP3]](%[[IV1]])[%[[N3_2]]] +// CHECK: %[[TILE_N3:.+]] = affine.min #[[MAP3]](%[[IV1]], %[[N3_2]]) // CHECK: %[[ST_ARG3:.+]] = tensor.extract_slice %[[ARG3]][%[[IV2]], %[[IV1]]] // CHECK-SAME: [%[[TILE_N2]], %[[TILE_N3]]] // CHECK: %[[M_4:.+]] = tensor.dim %[[ARG10]], %[[C0]] diff --git a/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir --- a/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir +++ b/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir @@ -200,15 +200,15 @@ } // CHECK: #[[BOUND8_MAP:.+]] = affine_map<(d0)[s0] -> (8, -d0 + s0)> -// CHECK: #[[BOUND8_MAP_2:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s0, 8, -d0 + s1)> +// CHECK: #[[BOUND8_MAP_2:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s1, 8, -d0 + s0)> // CHECK: #[[BOUND16_MAP:.+]] = affine_map<(d0)[s0] -> (16, -d0 + s0)> // CHECK: #[[X2_MAP:.+]] = affine_map<(d0) -> (d0 * 2)> // CHECK: #[[INPUT_BOUND:.+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * 2 + s0 - 2, d1 * -2 + s0 + s1 * 2 - 2)> -// CHECK: #[[BOUND16_MAP_2:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s0, 16, -d0 + s1)> +// CHECK: #[[BOUND16_MAP_2:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s1, 16, -d0 + s0)> // CHECK: #[[BOUND4_MAP:.+]] = affine_map<(d0)[s0] -> (4, -d0 + s0)> // CHECK: #[[BOUND2_MAP:.+]] = affine_map<(d0)[s0] -> (2, -d0 + s0)> -// CHECK: #[[BOUND4_MAP_2:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s0, 4, -d0 + s1)> -// CHECK: #[[BOUND2_MAP_2:.+]] = affine_map<(d0, d1)[s0, s1] -> (-d0 + s0, 2, -d1 + s1)> +// CHECK: #[[BOUND4_MAP_2:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s1, 4, -d0 + s0)> +// CHECK: #[[BOUND2_MAP_2:.+]] = affine_map<(d0, d1)[s0, s1] -> (-d0 + s1, 2, -d1 + s0)> // CHECK: func @conv_tensors_dynamic // CHECK-SAME: (%[[INPUT]]: tensor, %[[FILTER]]: tensor, %[[ELEM]]: tensor) @@ -237,12 +237,12 @@ // CHECK: scf.for %[[IV0:.+]] = %{{.+}} to %[[ELEM_N]] step %{{.+}} iter_args(%{{.+}} = %[[FILL]]) // CHECK-NEXT: %[[SIZE_ELEM_N:.+]] = affine.min #[[BOUND8_MAP]](%[[IV0]])[%[[ELEM_N]]] -// CHECK-NEXT: %[[SIZE_INPUT_N:.+]] = affine.min #[[BOUND8_MAP_2]](%[[IV0]])[%[[INPUT_N]], %[[ELEM_N]]] +// CHECK-NEXT: %[[SIZE_INPUT_N:.+]] = affine.min #[[BOUND8_MAP_2]](%[[IV0]])[%[[ELEM_N]], %[[INPUT_N]]] // CHECK-NEXT: scf.for %[[IV1:.+]] = %{{.+}} to %[[ELEM_OH]] // CHECK-NEXT: %[[SIZE_ELEM_OH:.+]] = affine.min #[[BOUND16_MAP]](%[[IV1]])[%[[ELEM_OH]]] // CHECK-NEXT: %[[OFFSET_OH:.+]] = affine.apply #[[X2_MAP]](%[[IV1]]) // CHECK-NEXT: %[[SIZE_INPUT_H:.+]] = affine.min #[[INPUT_BOUND]](%[[SIZE_ELEM_OH]], %[[IV1]])[%[[FILTER_H]], %[[FILL_H]]] -// CHECK-NEXT: %[[SIZE_ELEM_OH_2:.+]] = affine.min #[[BOUND16_MAP_2]](%[[IV1]])[%[[FILL_H]], %[[ELEM_OH]]] +// CHECK-NEXT: %[[SIZE_ELEM_OH_2:.+]] = affine.min #[[BOUND16_MAP_2]](%[[IV1]])[%[[ELEM_OH]], %[[FILL_H]]] // CHECK-NEXT: scf.for %[[IV2:.+]] = %{{.+}} to %[[ELEM_OW]] // CHECK-NEXT: %[[SIZE_ELEM_OW:.+]] = affine.min #[[BOUND4_MAP]](%[[IV2]])[%[[ELEM_OW]]] // CHECK-NEXT: %[[SIZE_ELEM_OC:.+]] = affine.min #[[BOUND2_MAP]](%[[IV2]])[%[[ELEM_OC]]] @@ -250,13 +250,13 @@ // CHECK-NEXT: %[[SIZE_INPUT_W:.+]] = affine.min #[[INPUT_BOUND]](%[[SIZE_ELEM_OW]], %[[IV2]])[%[[FILTER_W]], %[[FILL_W]]] // CHECK-NEXT: %[[ST_INPUT:.+]] = tensor.extract_slice %[[INPUT]][%[[IV0]], %[[OFFSET_OH]], %[[OFFSET_OW]], 0] // CHECK-SAME: [%[[SIZE_INPUT_N]], %[[SIZE_INPUT_H]], %[[SIZE_INPUT_W]], %[[INPUT_C]]] -// CHECK-NEXT: %[[SIZE_ELEM_OW_2:.+]] = affine.min #[[BOUND4_MAP_2]](%[[IV2]])[%[[FILL_W]], %[[ELEM_OW]]] +// CHECK-NEXT: %[[SIZE_ELEM_OW_2:.+]] = affine.min #[[BOUND4_MAP_2]](%[[IV2]])[%[[ELEM_OW]], %[[FILL_W]]] // CHECK-NEXT: scf.for %[[IV3:.+]] = %{{.+}} to %[[ELEM_OC]] step %{{.+}} iter_args(%[[ARG:[a-z0-9]+]] // CHECK-NEXT: %[[ST_ELEM:.+]] = tensor.extract_slice %[[ELEM]][%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]] // CHECK-SAME: [%[[SIZE_ELEM_N]], %[[SIZE_ELEM_OH]], %[[SIZE_ELEM_OW]], %[[SIZE_ELEM_OC]]] // CHECK-NEXT: %[[ST_ARG:.+]] = tensor.extract_slice %[[ARG]][%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]] // CHECK-SAME: [%[[SIZE_ELEM_N]], %[[SIZE_ELEM_OH]], %[[SIZE_ELEM_OW]], %[[SIZE_ELEM_OC]]] -// CHECK-NEXT: %[[SIZE_ELEM_OC_2:.+]] = affine.min #[[BOUND2_MAP_2]](%[[IV3]], %[[IV2]])[%[[FILTER_OC]], %[[ELEM_OC]]] +// CHECK-NEXT: %[[SIZE_ELEM_OC_2:.+]] = affine.min #[[BOUND2_MAP_2]](%[[IV3]], %[[IV2]])[%[[ELEM_OC]], %[[FILTER_OC]]] // CHECK-NEXT: %[[ST_FILTER:.+]] = tensor.extract_slice %[[FILTER]][0, 0, 0, %[[IV3]]] // CHECK-SAME: [%[[FILTER_H]], %[[FILTER_W]], %[[FILTER_IC]], %[[SIZE_ELEM_OC_2]]] // CHECK-NEXT: %[[ST_FILL:.+]] = tensor.extract_slice %[[FILL]][%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]] diff --git a/mlir/test/Dialect/Linalg/tile-pad-tensor-op.mlir b/mlir/test/Dialect/Linalg/tile-pad-tensor-op.mlir --- a/mlir/test/Dialect/Linalg/tile-pad-tensor-op.mlir +++ b/mlir/test/Dialect/Linalg/tile-pad-tensor-op.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -linalg-tile="tile-sizes=2,3" -cse -split-input-file | \ -// RUN: FileCheck %s -check-prefix=TILE2 +// R-UN: mlir-opt %s -linalg-tile="tile-sizes=2,3" -cse -split-input-file | \ +// R-UN: FileCheck %s -check-prefix=TILE2 // RUN: mlir-opt %s -linalg-tile="tile-sizes=0,3" -resolve-shaped-type-result-dims -cse -split-input-file | \ // RUN: FileCheck %s -check-prefix=TILE1 @@ -27,6 +27,7 @@ // TILE1-DAG: #[[MAP0:.*]] = affine_map<()[s0] -> (s0 + 7)> // TILE1-DAG: #[[MAP1:.*]] = affine_map<()[s0] -> (s0 + 8)> +// TILE1-DAG: #[[MAP2:.*]] = affine_map<(d0) -> (d0 + 8)> // TILE1: func @dynamic_pad_tensor( // TILE1-SAME: %[[IN:.*]]: tensor // TILE1-DAG: %[[C0:.*]] = arith.constant 0 : index @@ -37,12 +38,13 @@ // TILE1: %[[DIM_IN0:.*]] = tensor.dim %[[IN]], %[[C0]] // TILE1: %[[DIM0:.*]] = affine.apply #[[MAP1]]()[%[[DIM_IN0]]] // TILE1: %[[RESULT:.*]] = scf.for {{.*}} = %[[C0]] to %[[DIM1]] step %[[C3]] iter_args(%[[INNER_OUT:.*]] = +// TILE1: %[[DIM0_plus_8:.*]] = affine.apply #[[MAP2]](%[[DIM_IN0]]) // TILE1: %[[SWAP_RESULT:.*]] = scf.if // TILE1: tensor.generate // TILE1: else // TILE1: %[[SLICE:.*]] = tensor.extract_slice %[[IN]][{{.*}}, {{.*}}] [{{.*}}, {{.*}}] [1, 1] // TILE1: %[[PAD:.*]] = linalg.pad_tensor %[[SLICE]] low[3, %{{.*}}] high[{{.*}}, {{.*}}] -// TILE1: tensor.insert_slice %[[SWAP_RESULT]] into %[[INNER_OUT]][0, {{.*}}] [%[[DIM0]], {{.*}}] [1, 1] +// TILE1: tensor.insert_slice %[[SWAP_RESULT]] into %[[INNER_OUT]][0, {{.*}}] [%[[DIM0_plus_8]], {{.*}}] [1, 1] // TILE1: return %[[RESULT]] func @dynamic_pad_tensor(%input_tensor: tensor,