diff --git a/mlir/include/mlir/Transforms/Passes.td b/mlir/include/mlir/Transforms/Passes.td --- a/mlir/include/mlir/Transforms/Passes.td +++ b/mlir/include/mlir/Transforms/Passes.td @@ -29,6 +29,8 @@ }]; let constructor = "mlir::createCanonicalizerPass()"; let options = [ + Option<"cseConstants", "cse-constants", "bool", /*default=*/"true", + "Deduplicate and potentially hoist constants">, Option<"topDownProcessingEnabled", "top-down", "bool", /*default=*/"true", "Seed the worklist in general top-down order">, diff --git a/mlir/lib/Transforms/Canonicalizer.cpp b/mlir/lib/Transforms/Canonicalizer.cpp --- a/mlir/lib/Transforms/Canonicalizer.cpp +++ b/mlir/lib/Transforms/Canonicalizer.cpp @@ -13,7 +13,9 @@ #include "mlir/Transforms/Passes.h" +#include "mlir/IR/Matchers.h" #include "mlir/Pass/Pass.h" +#include "mlir/Transforms/FoldUtils.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" namespace mlir { @@ -62,6 +64,15 @@ // Canonicalization is best-effort. Non-convergence is not a pass failure. if (testConvergence && failed(converged)) signalPassFailure(); + + if (this->cseConstants) { + OperationFolder folder(getOperation()->getContext()); + getOperation()->walk([&](Operation *op) { + Attribute constValue; + if (matchPattern(op, m_Constant(&constValue))) + (void)folder.insertKnownConstant(op, constValue); + }); + } } FrozenRewritePatternSet patterns; diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp --- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp +++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp @@ -11,7 +11,6 @@ //===----------------------------------------------------------------------===// #include "mlir/Transforms/GreedyPatternRewriteDriver.h" -#include "mlir/IR/Matchers.h" #include "mlir/Interfaces/SideEffectInterfaces.h" #include "mlir/Rewrite/PatternApplicator.h" #include "mlir/Transforms/FoldUtils.h" @@ -415,16 +414,6 @@ } LogicalResult RegionPatternRewriteDriver::simplify() && { - auto insertKnownConstant = [&](Operation *op) { - // Check for existing constants when populating the worklist. This avoids - // accidentally reversing the constant order during processing. - Attribute constValue; - if (matchPattern(op, m_Constant(&constValue))) - if (!folder.insertKnownConstant(op, constValue)) - return true; - return false; - }; - bool changed = false; int64_t iteration = 0; do { @@ -439,17 +428,12 @@ if (!config.useTopDownTraversal) { // Add operations to the worklist in postorder. region.walk([&](Operation *op) { - if (!insertKnownConstant(op)) - addToWorklist(op); + addToWorklist(op); }); } else { // Add all nested operations to the worklist in preorder. region.walk([&](Operation *op) { - if (!insertKnownConstant(op)) { - worklist.push_back(op); - return WalkResult::advance(); - } - return WalkResult::skip(); + worklist.push_back(op); }); // Reverse the list so our pop-back loop processes them in-order. diff --git a/mlir/test/Dialect/GPU/transform-gpu.mlir b/mlir/test/Dialect/GPU/transform-gpu.mlir --- a/mlir/test/Dialect/GPU/transform-gpu.mlir +++ b/mlir/test/Dialect/GPU/transform-gpu.mlir @@ -50,10 +50,10 @@ %c12 = arith.constant 12 : index %c9 = arith.constant 9 : index %c7 = arith.constant 7 : index -// CHECK: %[[C1:.*]] = arith.constant 1 : index -// CHECK: %[[C12:.*]] = arith.constant 12 : index -// CHECK: %[[C9:.*]] = arith.constant 9 : index -// CHECK: %[[C7:.*]] = arith.constant 7 : index +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[C12:.*]] = arith.constant 12 : index +// CHECK-DAG: %[[C9:.*]] = arith.constant 9 : index +// CHECK-DAG: %[[C7:.*]] = arith.constant 7 : index // CHECK: gpu.launch async [%{{.*}}] blocks(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %[[C1]], %{{.*}} = %[[C1]], %{{.*}} = %[[C1]]) threads(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %[[C12]], %{{.*}} = %[[C9]], %{{.*}} = %[[C1]]) // CHECK: %[[TIDX:.*]] = gpu.thread_id x // CHECK: %[[TIDY:.*]] = gpu.thread_id y @@ -101,10 +101,10 @@ %c32 = arith.constant 32 : index %c64 = arith.constant 64 : index %c4 = arith.constant 4 : index -// CHECK: %[[C32:.*]] = arith.constant 32 : index -// CHECK: %[[C64:.*]] = arith.constant 64 : index -// CHECK: %[[C4:.*]] = arith.constant 4 : index -// CHECK: %[[C1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[C32:.*]] = arith.constant 32 : index +// CHECK-DAG: %[[C64:.*]] = arith.constant 64 : index +// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : index +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK: gpu.launch blocks(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %[[C32]], %{{.*}} = %[[C64]], %{{.*}} = %[[C1]]) threads(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %[[C32]], %{{.*}} = %[[C4]], %{{.*}} = %[[C1]]) // CHECK: %[[BLKX:.*]] = gpu.block_id x // CHECK: %[[BLKY:.*]] = gpu.block_id y diff --git a/mlir/test/Dialect/MemRef/canonicalize.mlir b/mlir/test/Dialect/MemRef/canonicalize.mlir --- a/mlir/test/Dialect/MemRef/canonicalize.mlir +++ b/mlir/test/Dialect/MemRef/canonicalize.mlir @@ -140,8 +140,8 @@ } // CHECK: func @subview_negative_stride1 // CHECK-SAME: (%[[ARG0:.*]]: memref) -// CHECK: %[[C1:.*]] = arith.constant 0 -// CHECK: %[[C2:.*]] = arith.constant -1 +// CHECK-DAG: %[[C1:.*]] = arith.constant 0 +// CHECK-DAG: %[[C2:.*]] = arith.constant -1 // CHECK: %[[DIM1:.*]] = memref.dim %[[ARG0]], %[[C1]] : memref // CHECK: %[[DIM2:.*]] = arith.addi %[[DIM1]], %[[C2]] : index // CHECK: %[[RES1:.*]] = memref.subview %[[ARG0]][%[[DIM2]]] [%[[DIM1]]] [-1] : memref to memref> diff --git a/mlir/test/Dialect/SCF/canonicalize.mlir b/mlir/test/Dialect/SCF/canonicalize.mlir --- a/mlir/test/Dialect/SCF/canonicalize.mlir +++ b/mlir/test/Dialect/SCF/canonicalize.mlir @@ -1054,20 +1054,20 @@ } return %0#0, %0#1, %0#2, %0#3, %0#4 : tensor, tensor, tensor, tensor, tensor } -// CHECK: %[[ZERO:.*]] = arith.constant dense<0> -// CHECK: %[[ONE:.*]] = arith.constant dense<1> -// CHECK: %[[CST42:.*]] = arith.constant dense<42> -// CHECK: %[[WHILE:.*]]:3 = scf.while (%[[ARG0:.*]] = %[[ZERO]], %[[ARG2:.*]] = %[[ONE]], %[[ARG3:.*]] = %[[ONE]]) -// CHECK: arith.cmpi slt, %[[ARG0]], %{{.*}} -// CHECK: tensor.extract %{{.*}}[] -// CHECK: scf.condition(%{{.*}}) %[[ARG0]], %[[ARG2]], %[[ARG3]] -// CHECK: } do { -// CHECK: ^{{.*}}(%[[ARG0:.*]]: tensor, %[[ARG2:.*]]: tensor, %[[ARG3:.*]]: tensor): -// CHECK: %[[VAL0:.*]] = arith.addi %[[ARG0]], %[[FUNC_ARG0]] -// CHECK: %[[VAL1:.*]] = arith.addi %[[ARG2]], %[[ARG3]] -// CHECK: scf.yield %[[VAL0]], %[[VAL1]], %[[VAL1]] -// CHECK: } -// CHECK: return %[[WHILE]]#0, %[[FUNC_ARG0]], %[[WHILE]]#1, %[[WHILE]]#2, %[[ZERO]] +// CHECK-DAG: %[[ZERO:.*]] = arith.constant dense<0> +// CHECK-DAG: %[[ONE:.*]] = arith.constant dense<1> +// CHECK-DAG: %[[CST42:.*]] = arith.constant dense<42> +// CHECK: %[[WHILE:.*]]:3 = scf.while (%[[ARG0:.*]] = %[[ZERO]], %[[ARG2:.*]] = %[[ONE]], %[[ARG3:.*]] = %[[ONE]]) +// CHECK: arith.cmpi slt, %[[ARG0]], %{{.*}} +// CHECK: tensor.extract %{{.*}}[] +// CHECK: scf.condition(%{{.*}}) %[[ARG0]], %[[ARG2]], %[[ARG3]] +// CHECK: } do { +// CHECK: ^{{.*}}(%[[ARG0:.*]]: tensor, %[[ARG2:.*]]: tensor, %[[ARG3:.*]]: tensor): +// CHECK: %[[VAL0:.*]] = arith.addi %[[ARG0]], %[[FUNC_ARG0]] +// CHECK: %[[VAL1:.*]] = arith.addi %[[ARG2]], %[[ARG3]] +// CHECK: scf.yield %[[VAL0]], %[[VAL1]], %[[VAL1]] +// CHECK: } +// CHECK: return %[[WHILE]]#0, %[[FUNC_ARG0]], %[[WHILE]]#1, %[[WHILE]]#2, %[[ZERO]] // CHECK-LABEL: @while_loop_invariant_argument_different_order func.func @while_loop_invariant_argument_different_order() -> (tensor, tensor, tensor, tensor, tensor, tensor) { @@ -1087,18 +1087,18 @@ } return %0#0, %0#1, %0#2, %0#3, %0#4, %0#5 : tensor, tensor, tensor, tensor, tensor, tensor } -// CHECK: %[[ZERO:.*]] = arith.constant dense<0> -// CHECK: %[[ONE:.*]] = arith.constant dense<1> -// CHECK: %[[CST42:.*]] = arith.constant dense<42> -// CHECK: %[[WHILE:.*]]:2 = scf.while (%[[ARG1:.*]] = %[[ONE]], %[[ARG4:.*]] = %[[ZERO]]) -// CHECK: arith.cmpi slt, %[[ZERO]], %[[CST42]] -// CHECK: tensor.extract %{{.*}}[] -// CHECK: scf.condition(%{{.*}}) %[[ARG1]], %[[ARG4]] -// CHECK: } do { -// CHECK: ^{{.*}}(%{{.*}}: tensor, %{{.*}}: tensor): -// CHECK: scf.yield %[[ZERO]], %[[ONE]] -// CHECK: } -// CHECK: return %[[WHILE]]#0, %[[ZERO]], %[[ONE]], %[[ZERO]], %[[ONE]], %[[WHILE]]#1 +// CHECK-DAG: %[[ZERO:.*]] = arith.constant dense<0> +// CHECK-DAG: %[[ONE:.*]] = arith.constant dense<1> +// CHECK-DAG: %[[CST42:.*]] = arith.constant dense<42> +// CHECK: %[[WHILE:.*]]:2 = scf.while (%[[ARG1:.*]] = %[[ONE]], %[[ARG4:.*]] = %[[ZERO]]) +// CHECK: arith.cmpi slt, %[[ZERO]], %[[CST42]] +// CHECK: tensor.extract %{{.*}}[] +// CHECK: scf.condition(%{{.*}}) %[[ARG1]], %[[ARG4]] +// CHECK: } do { +// CHECK: ^{{.*}}(%{{.*}}: tensor, %{{.*}}: tensor): +// CHECK: scf.yield %[[ZERO]], %[[ONE]] +// CHECK: } +// CHECK: return %[[WHILE]]#0, %[[ZERO]], %[[ONE]], %[[ZERO]], %[[ONE]], %[[WHILE]]#1 // ----- diff --git a/mlir/test/Dialect/SCF/loop-pipelining.mlir b/mlir/test/Dialect/SCF/loop-pipelining.mlir --- a/mlir/test/Dialect/SCF/loop-pipelining.mlir +++ b/mlir/test/Dialect/SCF/loop-pipelining.mlir @@ -547,13 +547,13 @@ // ----- // CHECK: @pipeline_op_with_region(%[[ARG0:.+]]: memref, %[[ARG1:.+]]: memref, %[[ARG2:.+]]: memref, %[[CF:.*]]: f32) { -// CHECK: %[[C0:.+]] = arith.constant 0 : -// CHECK: %[[C3:.+]] = arith.constant 3 : -// CHECK: %[[C1:.+]] = arith.constant 1 : -// CHECK: %[[APRO:.+]] = memref.alloc() : -// CHECK: %[[BPRO:.+]] = memref.alloc() : -// CHECK: %[[ASV0:.+]] = memref.subview %[[ARG0]][%[[C0]]] [8] [1] : -// CHECK: %[[BSV0:.+]] = memref.subview %[[ARG1]][%[[C0]]] [8] [1] : +// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : +// CHECK-DAG: %[[C3:.+]] = arith.constant 3 : +// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : +// CHECK: %[[APRO:.+]] = memref.alloc() : +// CHECK: %[[BPRO:.+]] = memref.alloc() : +// CHECK: %[[ASV0:.+]] = memref.subview %[[ARG0]][%[[C0]]] [8] [1] : +// CHECK: %[[BSV0:.+]] = memref.subview %[[ARG1]][%[[C0]]] [8] [1] : // Prologue: // CHECK: %[[PAV0:.+]] = memref.subview %[[APRO]][%[[C0]], 0] [1, 8] [1, 1] : diff --git a/mlir/test/Dialect/SparseTensor/buffer_rewriting.mlir b/mlir/test/Dialect/SparseTensor/buffer_rewriting.mlir --- a/mlir/test/Dialect/SparseTensor/buffer_rewriting.mlir +++ b/mlir/test/Dialect/SparseTensor/buffer_rewriting.mlir @@ -101,8 +101,8 @@ // CHECK-SAME: %[[X0:.*]]: memref, // CHECK-SAME: %[[Y0:.*]]: memref, // CHECK-SAME: %[[Y1:.*]]: memref) -> index { -// CHECK: %[[C1:.*]] = arith.constant 1 -// CHECK: %[[VAL_6:.*]] = arith.constant - +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 +// CHECK-DAG: %[[VAL_6:.*]] = arith.constant - // CHECK: %[[SUM:.*]] = arith.addi %[[L]], %[[H]] // CHECK: %[[P:.*]] = arith.shrui %[[SUM]], %[[C1]] // CHECK: %[[J:.*]] = arith.subi %[[H]], %[[C1]] diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -312,8 +312,8 @@ // CHECK-LABEL: func.func @sparse_alloc_csc( // CHECK-SAME: %[[A0:.*]]: index) -> (memref, memref, memref, !sparse_tensor.storage_specifier -// CHECK: %[[A1:.*]] = arith.constant 10 : i64 -// CHECK: %[[A2:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[A1:.*]] = arith.constant 10 : i64 +// CHECK-DAG: %[[A2:.*]] = arith.constant 0 : index // CHECK: %[[A3:.*]] = memref.alloc() : memref<16xindex> // CHECK: %[[A4:.*]] = memref.cast %[[A3]] : memref<16xindex> to memref // CHECK: %[[A5:.*]] = memref.alloc() : memref<16xindex> @@ -340,11 +340,11 @@ } // CHECK-LABEL: func.func @sparse_alloc_3d() -> (memref, !sparse_tensor.storage_specifier -// CHECK: %[[A0:.*]] = arith.constant 6000 : index -// CHECK: %[[A1:.*]] = arith.constant 20 : i64 -// CHECK: %[[A2:.*]] = arith.constant 10 : i64 -// CHECK: %[[A3:.*]] = arith.constant 30 : i64 -// CHECK: %[[A4:.*]] = arith.constant 0.000000e+00 : f64 +// CHECK-DAG: %[[A0:.*]] = arith.constant 6000 : index +// CHECK-DAG: %[[A1:.*]] = arith.constant 20 : i64 +// CHECK-DAG: %[[A2:.*]] = arith.constant 10 : i64 +// CHECK-DAG: %[[A3:.*]] = arith.constant 30 : i64 +// CHECK-DAG: %[[A4:.*]] = arith.constant 0.000000e+00 : f64 // CHECK: %[[A5:.*]] = memref.alloc() : memref<6000xf64> // CHECK: %[[A6:.*]] = memref.cast %[[A5]] : memref<6000xf64> to memref // CHECK: %[[A7:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier @@ -473,11 +473,11 @@ // CHECK-SAME: %[[A6:.*6]]: memref, // CHECK-SAME: %[[A7:.*7]]: index, // CHECK-SAME: %[[A8:.*8]]: index) -> (memref, memref, memref, !sparse_tensor.storage_specifier -// CHECK: %[[A9:.*]] = arith.constant 0 : i32 -// CHECK: %[[A10:.*]] = arith.constant false -// CHECK: %[[A11:.*]] = arith.constant 0.000000e+00 : f64 -// CHECK: %[[A12:.*]] = arith.constant 1 : index -// CHECK: %[[A13:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[A9:.*]] = arith.constant 0 : i32 +// CHECK-DAG: %[[A10:.*]] = arith.constant false +// CHECK-DAG: %[[A11:.*]] = arith.constant 0.000000e+00 : f64 +// CHECK-DAG: %[[A12:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[A13:.*]] = arith.constant 0 : index // CHECK: sparse_tensor.sort %[[A7]], %[[A6]] : memref // CHECK: %[[A14:.*]]:4 = scf.for %[[A15:.*]] = %[[A13]] to %[[A7]] step %[[A12]] iter_args(%[[A16:.*]] = %[[A0]], %[[A17:.*]] = %[[A1]], %[[A18:.*]] = %[[A2]], %[[A19:.*]] = %[[A3]]) -> (memref, memref, memref, !sparse_tensor.storage_specifier // CHECK: %[[A20:.*]] = memref.load %[[A6]]{{\[}}%[[A15]]] : memref @@ -534,10 +534,10 @@ // CHECK-SAME: %[[A6:.*6]]: memref, // CHECK-SAME: %[[A7:.*7]]: index, // CHECK-SAME: %[[A8:.*8]]: index) -> (memref, memref, memref, !sparse_tensor.storage_specifier -// CHECK: %[[A9:.*]] = arith.constant false -// CHECK: %[[A10:.*]] = arith.constant 0.000000e+00 : f64 -// CHECK: %[[A11:.*]] = arith.constant 0 : index -// CHECK: %[[A12:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[A9:.*]] = arith.constant false +// CHECK-DAG: %[[A10:.*]] = arith.constant 0.000000e+00 : f64 +// CHECK-DAG: %[[A11:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[A12:.*]] = arith.constant 1 : index // CHECK: %[[A13:.*]]:4 = scf.for %[[A14:.*]] = %[[A11]] to %[[A7]] step %[[A12]] iter_args(%[[A15:.*]] = %[[A0]], %[[A16:.*]] = %[[A1]], %[[A17:.*]] = %[[A2]], %[[A18:.*]] = %[[A3]]) -> (memref, memref, memref, !sparse_tensor.storage_specifier // CHECK: %[[A19:.*]] = memref.load %[[A6]]{{\[}}%[[A14]]] : memref // CHECK: %[[A20:.*]] = memref.load %[[A4]]{{\[}}%[[A19]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir b/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir --- a/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir @@ -4,9 +4,9 @@ // CHECK-LABEL: func.func @sparse_alloc_sparse_vector( // CHECK-SAME: %[[VAL_0:.*]]: index) -> (memref, memref, memref, !sparse_tensor.storage_specifier -// CHECK: %[[VAL_1:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f64 -// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f64 +// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK: %[[VAL_4:.*]] = memref.alloc() : memref<16xindex> // CHECK: %[[VAL_5:.*]] = memref.cast %[[VAL_4]] : memref<16xindex> to memref // CHECK: linalg.fill ins(%[[VAL_3]] : index) outs(%[[VAL_4]] : memref<16xindex>) diff --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir @@ -401,7 +401,7 @@ // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_3:.*]] = complex.constant [0.000000e+00, 1.000000e+00] : complex +// CHECK-DAG: %[[VAL_3:.*]] = complex.constant [0.000000e+00, 1.000000e+00] : complex // CHECK: %[[VAL_4:.*]] = bufferization.alloc_tensor() : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> // CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir @@ -64,13 +64,13 @@ // CHECK-SAME: %[[VAL_5:.*5]]: memref, // CHECK-SAME: %[[VAL_6:.*6]]: memref, // CHECK-SAME: %[[VAL_7:.*7]]: !sparse_tensor.storage_specifier -// CHECK: %[[VAL_8:.*]] = arith.constant 4 : index -// CHECK: %[[VAL_9:.*]] = arith.constant 4 : i64 -// CHECK: %[[VAL_10:.*]] = arith.constant 0.000000e+00 : f64 -// CHECK: %[[VAL_11:.*]] = arith.constant 0 : index -// CHECK: %[[VAL_12:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_13:.*]] = arith.constant false -// CHECK: %[[VAL_14:.*]] = arith.constant true +// CHECK-DAG: %[[VAL_8:.*]] = arith.constant 4 : index +// CHECK-DAG: %[[VAL_9:.*]] = arith.constant 4 : i64 +// CHECK-DAG: %[[VAL_10:.*]] = arith.constant 0.000000e+00 : f64 +// CHECK-DAG: %[[VAL_11:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_12:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_13:.*]] = arith.constant false +// CHECK-DAG: %[[VAL_14:.*]] = arith.constant true // CHECK: %[[VAL_15:.*]] = memref.alloc() : memref<16xindex> // CHECK: %[[VAL_16:.*]] = memref.cast %[[VAL_15]] : memref<16xindex> to memref // CHECK: %[[VAL_17:.*]] = memref.alloc() : memref<16xindex> diff --git a/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir b/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir --- a/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir +++ b/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir @@ -71,8 +71,8 @@ // CHECK-LABEL: func @transfer_read_flattenable_with_dynamic_dims_and_indices // CHECK-SAME: %[[ARG0:.+]]: memref, %[[ARG1:.+]]: index, %[[ARG2:.+]]: index -// CHECK: %[[C0_I8:.+]] = arith.constant 0 : i8 -// CHECK: %[[C0:.+]] = arith.constant 0 : index +// CHECK-DAG: %[[C0_I8:.+]] = arith.constant 0 : i8 +// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index // CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[ARG0]] {{\[}}[0], [1], [2, 3]{{\]}} // CHECK-SAME: : memref into memref // CHECK: %[[VEC1D:.+]] = vector.transfer_read %[[COLLAPSED]] diff --git a/mlir/test/Transforms/test-operation-folder.mlir b/mlir/test/Transforms/test-operation-folder.mlir --- a/mlir/test/Transforms/test-operation-folder.mlir +++ b/mlir/test/Transforms/test-operation-folder.mlir @@ -23,14 +23,3 @@ %1 = "test.cast"() {test_fold_before_previously_folded_op} : () -> (i32) return %0, %1 : i32, i32 } - -func.func @test_dont_reorder_constants() -> (i32, i32, i32) { - // Test that we don't reorder existing constants during folding if it isn't necessary. - // CHECK: %[[CST:.+]] = arith.constant 1 - // CHECK-NEXT: %[[CST:.+]] = arith.constant 2 - // CHECK-NEXT: %[[CST:.+]] = arith.constant 3 - %0 = arith.constant 1 : i32 - %1 = arith.constant 2 : i32 - %2 = arith.constant 3 : i32 - return %0, %1, %2 : i32, i32, i32 -}