diff --git a/mlir/test/Analysis/test-alias-analysis.mlir b/mlir/test/Analysis/test-alias-analysis.mlir --- a/mlir/test/Analysis/test-alias-analysis.mlir +++ b/mlir/test/Analysis/test-alias-analysis.mlir @@ -52,10 +52,10 @@ %1 = memref.alloca() {test.ptr = "alloca_2"} : memref<8x64xf32> %2 = memref.alloc() {test.ptr = "alloc_1"} : memref<8x64xf32> - cond_br %cond, ^bb1(%0 : memref<8x64xf32>), ^bb2(%0 : memref<8x64xf32>) + cf.cond_br %cond, ^bb1(%0 : memref<8x64xf32>), ^bb2(%0 : memref<8x64xf32>) ^bb1(%arg1: memref<8x64xf32>): - br ^bb2(%arg1 : memref<8x64xf32>) + cf.br ^bb2(%arg1 : memref<8x64xf32>) ^bb2(%arg2: memref<8x64xf32>): return @@ -85,10 +85,10 @@ %1 = memref.alloca() {test.ptr = "alloca_2"} : memref<8x64xf32> %2 = memref.alloc() {test.ptr = "alloc_1"} : memref<8x64xf32> - cond_br %cond, ^bb1(%0 : memref<8x64xf32>), ^bb2(%2 : memref<8x64xf32>) + cf.cond_br %cond, ^bb1(%0 : memref<8x64xf32>), ^bb2(%2 : memref<8x64xf32>) ^bb1(%arg1: memref<8x64xf32>): - br ^bb2(%arg1 : memref<8x64xf32>) + cf.br ^bb2(%arg1 : memref<8x64xf32>) ^bb2(%arg2: memref<8x64xf32>): return diff --git a/mlir/test/Analysis/test-dominance.mlir b/mlir/test/Analysis/test-dominance.mlir --- a/mlir/test/Analysis/test-dominance.mlir +++ b/mlir/test/Analysis/test-dominance.mlir @@ -2,11 +2,11 @@ // CHECK-LABEL: Testing : func_condBranch func @func_condBranch(%cond : i1) { - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: - br ^exit + cf.br ^exit ^bb2: - br ^exit + cf.br ^exit ^exit: return } @@ -49,14 +49,14 @@ // CHECK-LABEL: Testing : func_loop func @func_loop(%arg0 : i32, %arg1 : i32) { - br ^loopHeader(%arg0 : i32) + cf.br ^loopHeader(%arg0 : i32) ^loopHeader(%counter : i32): %lessThan = arith.cmpi slt, %counter, %arg1 : i32 - cond_br %lessThan, ^loopBody, ^exit + cf.cond_br %lessThan, ^loopBody, ^exit ^loopBody: %const0 = arith.constant 1 : i32 %inc = arith.addi %counter, %const0 : i32 - br ^loopHeader(%inc : i32) + cf.br ^loopHeader(%inc : i32) ^exit: return } @@ -153,17 +153,17 @@ %arg2 : index, %arg3 : index, %arg4 : index) { - br ^loopHeader(%arg0 : i32) + cf.br ^loopHeader(%arg0 : i32) ^loopHeader(%counter : i32): %lessThan = arith.cmpi slt, %counter, %arg1 : i32 - cond_br %lessThan, ^loopBody, ^exit + cf.cond_br %lessThan, ^loopBody, ^exit ^loopBody: %const0 = arith.constant 1 : i32 %inc = arith.addi %counter, %const0 : i32 scf.for %arg5 = %arg2 to %arg3 step %arg4 { scf.for %arg6 = %arg2 to %arg3 step %arg4 { } } - br ^loopHeader(%inc : i32) + cf.br ^loopHeader(%inc : i32) ^exit: return } diff --git a/mlir/test/Analysis/test-liveness.mlir b/mlir/test/Analysis/test-liveness.mlir --- a/mlir/test/Analysis/test-liveness.mlir +++ b/mlir/test/Analysis/test-liveness.mlir @@ -19,7 +19,7 @@ // CHECK-NEXT: LiveOut: arg0@0 arg1@0 // CHECK-NEXT: BeginLiveness // CHECK-NEXT: EndLiveness - br ^exit + cf.br ^exit ^exit: // CHECK: Block: 1 // CHECK-NEXT: LiveIn: arg0@0 arg1@0 @@ -42,17 +42,17 @@ // CHECK-NEXT: LiveOut: arg1@0 arg2@0 // CHECK-NEXT: BeginLiveness // CHECK-NEXT: EndLiveness - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: // CHECK: Block: 1 // CHECK-NEXT: LiveIn: arg1@0 arg2@0 // CHECK-NEXT: LiveOut: arg1@0 arg2@0 - br ^exit + cf.br ^exit ^bb2: // CHECK: Block: 2 // CHECK-NEXT: LiveIn: arg1@0 arg2@0 // CHECK-NEXT: LiveOut: arg1@0 arg2@0 - br ^exit + cf.br ^exit ^exit: // CHECK: Block: 3 // CHECK-NEXT: LiveIn: arg1@0 arg2@0 @@ -74,7 +74,7 @@ // CHECK-NEXT: LiveIn:{{ *$}} // CHECK-NEXT: LiveOut: arg1@0 %const0 = arith.constant 0 : i32 - br ^loopHeader(%const0, %arg0 : i32, i32) + cf.br ^loopHeader(%const0, %arg0 : i32, i32) ^loopHeader(%counter : i32, %i : i32): // CHECK: Block: 1 // CHECK-NEXT: LiveIn: arg1@0 @@ -82,10 +82,10 @@ // CHECK-NEXT: BeginLiveness // CHECK-NEXT: val_5 // CHECK-NEXT: %2 = arith.cmpi - // CHECK-NEXT: cond_br + // CHECK-NEXT: cf.cond_br // CHECK-NEXT: EndLiveness %lessThan = arith.cmpi slt, %counter, %arg1 : i32 - cond_br %lessThan, ^loopBody(%i : i32), ^exit(%i : i32) + cf.cond_br %lessThan, ^loopBody(%i : i32), ^exit(%i : i32) ^loopBody(%val : i32): // CHECK: Block: 2 // CHECK-NEXT: LiveIn: arg1@0 arg0@1 @@ -98,12 +98,12 @@ // CHECK-NEXT: val_8 // CHECK-NEXT: %4 = arith.addi // CHECK-NEXT: %5 = arith.addi - // CHECK-NEXT: br + // CHECK-NEXT: cf.br // CHECK: EndLiveness %const1 = arith.constant 1 : i32 %inc = arith.addi %val, %const1 : i32 %inc2 = arith.addi %counter, %const1 : i32 - br ^loopHeader(%inc, %inc2 : i32, i32) + cf.br ^loopHeader(%inc, %inc2 : i32, i32) ^exit(%sum : i32): // CHECK: Block: 3 // CHECK-NEXT: LiveIn: arg1@0 @@ -147,14 +147,14 @@ // CHECK-NEXT: val_9 // CHECK-NEXT: %4 = arith.muli // CHECK-NEXT: %5 = arith.addi - // CHECK-NEXT: cond_br + // CHECK-NEXT: cf.cond_br // CHECK-NEXT: %c // CHECK-NEXT: %6 = arith.muli // CHECK-NEXT: %7 = arith.muli // CHECK-NEXT: %8 = arith.addi // CHECK-NEXT: val_10 // CHECK-NEXT: %5 = arith.addi - // CHECK-NEXT: cond_br + // CHECK-NEXT: cf.cond_br // CHECK-NEXT: %7 // CHECK: EndLiveness %0 = arith.addi %arg1, %arg2 : i32 @@ -164,7 +164,7 @@ %3 = arith.muli %0, %1 : i32 %4 = arith.muli %3, %2 : i32 %5 = arith.addi %4, %const1 : i32 - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: // CHECK: Block: 1 @@ -172,7 +172,7 @@ // CHECK-NEXT: LiveOut: arg2@0 %const4 = arith.constant 4 : i32 %6 = arith.muli %4, %const4 : i32 - br ^exit(%6 : i32) + cf.br ^exit(%6 : i32) ^bb2: // CHECK: Block: 2 @@ -180,7 +180,7 @@ // CHECK-NEXT: LiveOut: arg2@0 %7 = arith.muli %4, %5 : i32 %8 = arith.addi %4, %arg2 : i32 - br ^exit(%8 : i32) + cf.br ^exit(%8 : i32) ^exit(%sum : i32): // CHECK: Block: 3 @@ -284,7 +284,7 @@ // CHECK-NEXT: %0 = arith.addi // CHECK-NEXT: %1 = arith.addi // CHECK-NEXT: scf.for - // CHECK: // br ^bb1 + // CHECK: // cf.br ^bb1 // CHECK-NEXT: %2 = arith.addi // CHECK-NEXT: scf.for // CHECK: // %2 = arith.addi @@ -301,7 +301,7 @@ %2 = arith.addi %0, %arg5 : i32 memref.store %2, %buffer[] : memref } - br ^exit + cf.br ^exit ^exit: // CHECK: Block: 2 diff --git a/mlir/test/CAPI/ir.c b/mlir/test/CAPI/ir.c --- a/mlir/test/CAPI/ir.c +++ b/mlir/test/CAPI/ir.c @@ -1531,10 +1531,10 @@ fprintf(stderr, "@registration\n"); // CHECK-LABEL: @registration - // CHECK: std.cond_br is_registered: 1 - fprintf(stderr, "std.cond_br is_registered: %d\n", + // CHECK: cf.cond_br is_registered: 1 + fprintf(stderr, "cf.cond_br is_registered: %d\n", mlirContextIsRegisteredOperation( - ctx, mlirStringRefCreateFromCString("std.cond_br"))); + ctx, mlirStringRefCreateFromCString("cf.cond_br"))); // CHECK: std.not_existing_op is_registered: 0 fprintf(stderr, "std.not_existing_op is_registered: %d\n", diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir --- a/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir +++ b/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir @@ -27,7 +27,7 @@ // CHECK: %[[IS_ERROR:.*]] = call @mlirAsyncRuntimeIsTokenError(%[[TOKEN]]) // CHECK: %[[TRUE:.*]] = arith.constant true // CHECK: %[[NOT_ERROR:.*]] = arith.xori %[[IS_ERROR]], %[[TRUE]] : i1 - // CHECK: assert %[[NOT_ERROR]] + // CHECK: cf.assert %[[NOT_ERROR]] // CHECK-NEXT: return async.await %token : !async.token return @@ -90,7 +90,7 @@ // CHECK: %[[IS_ERROR:.*]] = call @mlirAsyncRuntimeIsTokenError(%[[TOKEN]]) // CHECK: %[[TRUE:.*]] = arith.constant true // CHECK: %[[NOT_ERROR:.*]] = arith.xori %[[IS_ERROR]], %[[TRUE]] : i1 - // CHECK: assert %[[NOT_ERROR]] + // CHECK: cf.assert %[[NOT_ERROR]] async.await %token0 : !async.token return } diff --git a/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir b/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir @@ -0,0 +1,41 @@ +// RUN: mlir-opt -split-input-file -convert-std-to-spirv -verify-diagnostics %s | FileCheck %s + +//===----------------------------------------------------------------------===// +// cf.br, cf.cond_br +//===----------------------------------------------------------------------===// + +module attributes { + spv.target_env = #spv.target_env<#spv.vce, {}> +} { + +// CHECK-LABEL: func @simple_loop +func @simple_loop(index, index, index) { +^bb0(%begin : index, %end : index, %step : index): +// CHECK-NEXT: spv.Branch ^bb1 + cf.br ^bb1 + +// CHECK-NEXT: ^bb1: // pred: ^bb0 +// CHECK-NEXT: spv.Branch ^bb2({{.*}} : i32) +^bb1: // pred: ^bb0 + cf.br ^bb2(%begin : index) + +// CHECK: ^bb2({{.*}}: i32): // 2 preds: ^bb1, ^bb3 +// CHECK-NEXT: {{.*}} = spv.SLessThan {{.*}}, {{.*}} : i32 +// CHECK-NEXT: spv.BranchConditional {{.*}}, ^bb3, ^bb4 +^bb2(%0: index): // 2 preds: ^bb1, ^bb3 + %1 = arith.cmpi slt, %0, %end : index + cf.cond_br %1, ^bb3, ^bb4 + +// CHECK: ^bb3: // pred: ^bb2 +// CHECK-NEXT: {{.*}} = spv.IAdd {{.*}}, {{.*}} : i32 +// CHECK-NEXT: spv.Branch ^bb2({{.*}} : i32) +^bb3: // pred: ^bb2 + %2 = arith.addi %0, %step : index + cf.br ^bb2(%2 : index) + +// CHECK: ^bb4: // pred: ^bb2 +^bb4: // pred: ^bb2 + return +} + +} diff --git a/mlir/test/Conversion/GPUToNVVM/wmma-ops-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/wmma-ops-to-nvvm.mlir --- a/mlir/test/Conversion/GPUToNVVM/wmma-ops-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/wmma-ops-to-nvvm.mlir @@ -168,16 +168,16 @@ %c128 = arith.constant 128 : index %c32 = arith.constant 32 : index %0 = gpu.subgroup_mma_load_matrix %arg2[%c0, %c0] {leadDimension = 128 : index} : memref<128x128xf16> -> !gpu.mma_matrix<16x16xf16, "COp"> - br ^bb1(%c0, %0 : index, !gpu.mma_matrix<16x16xf16, "COp">) + cf.br ^bb1(%c0, %0 : index, !gpu.mma_matrix<16x16xf16, "COp">) ^bb1(%1: index, %2: !gpu.mma_matrix<16x16xf16, "COp">): // 2 preds: ^bb0, ^bb2 %3 = arith.cmpi slt, %1, %c128 : index - cond_br %3, ^bb2, ^bb3 + cf.cond_br %3, ^bb2, ^bb3 ^bb2: // pred: ^bb1 %4 = gpu.subgroup_mma_load_matrix %arg0[%c0, %1] {leadDimension = 128 : index} : memref<128x128xf16> -> !gpu.mma_matrix<16x16xf16, "AOp"> %5 = gpu.subgroup_mma_load_matrix %arg1[%1, %c0] {leadDimension = 128 : index} : memref<128x128xf16> -> !gpu.mma_matrix<16x16xf16, "BOp"> %6 = gpu.subgroup_mma_compute %4, %5, %2 : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp"> -> !gpu.mma_matrix<16x16xf16, "COp"> %7 = arith.addi %1, %c32 : index - br ^bb1(%7, %6 : index, !gpu.mma_matrix<16x16xf16, "COp">) + cf.br ^bb1(%7, %6 : index, !gpu.mma_matrix<16x16xf16, "COp">) ^bb3: // pred: ^bb1 gpu.subgroup_mma_store_matrix %2, %arg2[%c0, %c0] {leadDimension = 128 : index} : !gpu.mma_matrix<16x16xf16, "COp">, memref<128x128xf16> return diff --git a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir --- a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir +++ b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir @@ -22,17 +22,17 @@ // CHECK: omp.parallel omp.parallel { // CHECK-NEXT: llvm.br ^[[BB1:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64 - br ^bb1(%start, %end : index, index) + cf.br ^bb1(%start, %end : index, index) // CHECK-NEXT: ^[[BB1]](%[[ARG1:[0-9]+]]: i64, %[[ARG2:[0-9]+]]: i64):{{.*}} ^bb1(%0: index, %1: index): // CHECK-NEXT: %[[CMP:[0-9]+]] = llvm.icmp "slt" %[[ARG1]], %[[ARG2]] : i64 %2 = arith.cmpi slt, %0, %1 : index // CHECK-NEXT: llvm.cond_br %[[CMP]], ^[[BB2:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64), ^[[BB3:.*]] - cond_br %2, ^bb2(%end, %end : index, index), ^bb3 + cf.cond_br %2, ^bb2(%end, %end : index, index), ^bb3 // CHECK-NEXT: ^[[BB2]](%[[ARG3:[0-9]+]]: i64, %[[ARG4:[0-9]+]]: i64): ^bb2(%3: index, %4: index): // CHECK-NEXT: llvm.br ^[[BB1]](%[[ARG3]], %[[ARG4]] : i64, i64) - br ^bb1(%3, %4 : index, index) + cf.br ^bb1(%3, %4 : index, index) // CHECK-NEXT: ^[[BB3]]: ^bb3: omp.flush diff --git a/mlir/test/Conversion/SCFToStandard/convert-to-cfg.mlir b/mlir/test/Conversion/SCFToControlFlow/convert-to-cfg.mlir rename from mlir/test/Conversion/SCFToStandard/convert-to-cfg.mlir rename to mlir/test/Conversion/SCFToControlFlow/convert-to-cfg.mlir --- a/mlir/test/Conversion/SCFToStandard/convert-to-cfg.mlir +++ b/mlir/test/Conversion/SCFToControlFlow/convert-to-cfg.mlir @@ -1,14 +1,14 @@ -// RUN: mlir-opt -allow-unregistered-dialect -convert-scf-to-std %s | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect -convert-scf-to-cf %s | FileCheck %s // CHECK-LABEL: func @simple_std_for_loop(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index) { -// CHECK-NEXT: br ^bb1(%{{.*}} : index) +// CHECK-NEXT: cf.br ^bb1(%{{.*}} : index) // CHECK-NEXT: ^bb1(%{{.*}}: index): // 2 preds: ^bb0, ^bb2 // CHECK-NEXT: %{{.*}} = arith.cmpi slt, %{{.*}}, %{{.*}} : index -// CHECK-NEXT: cond_br %{{.*}}, ^bb2, ^bb3 +// CHECK-NEXT: cf.cond_br %{{.*}}, ^bb2, ^bb3 // CHECK-NEXT: ^bb2: // pred: ^bb1 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index // CHECK-NEXT: %[[iv:.*]] = arith.addi %{{.*}}, %{{.*}} : index -// CHECK-NEXT: br ^bb1(%[[iv]] : index) +// CHECK-NEXT: cf.br ^bb1(%[[iv]] : index) // CHECK-NEXT: ^bb3: // pred: ^bb1 // CHECK-NEXT: return func @simple_std_for_loop(%arg0 : index, %arg1 : index, %arg2 : index) { @@ -19,23 +19,23 @@ } // CHECK-LABEL: func @simple_std_2_for_loops(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index) { -// CHECK-NEXT: br ^bb1(%{{.*}} : index) +// CHECK-NEXT: cf.br ^bb1(%{{.*}} : index) // CHECK-NEXT: ^bb1(%[[ub0:.*]]: index): // 2 preds: ^bb0, ^bb5 // CHECK-NEXT: %[[cond0:.*]] = arith.cmpi slt, %[[ub0]], %{{.*}} : index -// CHECK-NEXT: cond_br %[[cond0]], ^bb2, ^bb6 +// CHECK-NEXT: cf.cond_br %[[cond0]], ^bb2, ^bb6 // CHECK-NEXT: ^bb2: // pred: ^bb1 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: br ^bb3(%{{.*}} : index) +// CHECK-NEXT: cf.br ^bb3(%{{.*}} : index) // CHECK-NEXT: ^bb3(%[[ub1:.*]]: index): // 2 preds: ^bb2, ^bb4 // CHECK-NEXT: %[[cond1:.*]] = arith.cmpi slt, %{{.*}}, %{{.*}} : index -// CHECK-NEXT: cond_br %[[cond1]], ^bb4, ^bb5 +// CHECK-NEXT: cf.cond_br %[[cond1]], ^bb4, ^bb5 // CHECK-NEXT: ^bb4: // pred: ^bb3 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index // CHECK-NEXT: %[[iv1:.*]] = arith.addi %{{.*}}, %{{.*}} : index -// CHECK-NEXT: br ^bb3(%[[iv1]] : index) +// CHECK-NEXT: cf.br ^bb3(%[[iv1]] : index) // CHECK-NEXT: ^bb5: // pred: ^bb3 // CHECK-NEXT: %[[iv0:.*]] = arith.addi %{{.*}}, %{{.*}} : index -// CHECK-NEXT: br ^bb1(%[[iv0]] : index) +// CHECK-NEXT: cf.br ^bb1(%[[iv0]] : index) // CHECK-NEXT: ^bb6: // pred: ^bb1 // CHECK-NEXT: return func @simple_std_2_for_loops(%arg0 : index, %arg1 : index, %arg2 : index) { @@ -49,10 +49,10 @@ } // CHECK-LABEL: func @simple_std_if(%{{.*}}: i1) { -// CHECK-NEXT: cond_br %{{.*}}, ^bb1, ^bb2 +// CHECK-NEXT: cf.cond_br %{{.*}}, ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // pred: ^bb0 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: br ^bb2 +// CHECK-NEXT: cf.br ^bb2 // CHECK-NEXT: ^bb2: // 2 preds: ^bb0, ^bb1 // CHECK-NEXT: return func @simple_std_if(%arg0: i1) { @@ -63,13 +63,13 @@ } // CHECK-LABEL: func @simple_std_if_else(%{{.*}}: i1) { -// CHECK-NEXT: cond_br %{{.*}}, ^bb1, ^bb2 +// CHECK-NEXT: cf.cond_br %{{.*}}, ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // pred: ^bb0 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: br ^bb3 +// CHECK-NEXT: cf.br ^bb3 // CHECK-NEXT: ^bb2: // pred: ^bb0 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: br ^bb3 +// CHECK-NEXT: cf.br ^bb3 // CHECK-NEXT: ^bb3: // 2 preds: ^bb1, ^bb2 // CHECK-NEXT: return func @simple_std_if_else(%arg0: i1) { @@ -82,18 +82,18 @@ } // CHECK-LABEL: func @simple_std_2_ifs(%{{.*}}: i1) { -// CHECK-NEXT: cond_br %{{.*}}, ^bb1, ^bb5 +// CHECK-NEXT: cf.cond_br %{{.*}}, ^bb1, ^bb5 // CHECK-NEXT: ^bb1: // pred: ^bb0 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: cond_br %{{.*}}, ^bb2, ^bb3 +// CHECK-NEXT: cf.cond_br %{{.*}}, ^bb2, ^bb3 // CHECK-NEXT: ^bb2: // pred: ^bb1 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: br ^bb4 +// CHECK-NEXT: cf.br ^bb4 // CHECK-NEXT: ^bb3: // pred: ^bb1 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: br ^bb4 +// CHECK-NEXT: cf.br ^bb4 // CHECK-NEXT: ^bb4: // 2 preds: ^bb2, ^bb3 -// CHECK-NEXT: br ^bb5 +// CHECK-NEXT: cf.br ^bb5 // CHECK-NEXT: ^bb5: // 2 preds: ^bb0, ^bb4 // CHECK-NEXT: return func @simple_std_2_ifs(%arg0: i1) { @@ -109,27 +109,27 @@ } // CHECK-LABEL: func @simple_std_for_loop_with_2_ifs(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: i1) { -// CHECK-NEXT: br ^bb1(%{{.*}} : index) +// CHECK-NEXT: cf.br ^bb1(%{{.*}} : index) // CHECK-NEXT: ^bb1(%{{.*}}: index): // 2 preds: ^bb0, ^bb7 // CHECK-NEXT: %{{.*}} = arith.cmpi slt, %{{.*}}, %{{.*}} : index -// CHECK-NEXT: cond_br %{{.*}}, ^bb2, ^bb8 +// CHECK-NEXT: cf.cond_br %{{.*}}, ^bb2, ^bb8 // CHECK-NEXT: ^bb2: // pred: ^bb1 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: cond_br %{{.*}}, ^bb3, ^bb7 +// CHECK-NEXT: cf.cond_br %{{.*}}, ^bb3, ^bb7 // CHECK-NEXT: ^bb3: // pred: ^bb2 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: cond_br %{{.*}}, ^bb4, ^bb5 +// CHECK-NEXT: cf.cond_br %{{.*}}, ^bb4, ^bb5 // CHECK-NEXT: ^bb4: // pred: ^bb3 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: br ^bb6 +// CHECK-NEXT: cf.br ^bb6 // CHECK-NEXT: ^bb5: // pred: ^bb3 // CHECK-NEXT: %{{.*}} = arith.constant 1 : index -// CHECK-NEXT: br ^bb6 +// CHECK-NEXT: cf.br ^bb6 // CHECK-NEXT: ^bb6: // 2 preds: ^bb4, ^bb5 -// CHECK-NEXT: br ^bb7 +// CHECK-NEXT: cf.br ^bb7 // CHECK-NEXT: ^bb7: // 2 preds: ^bb2, ^bb6 // CHECK-NEXT: %[[iv0:.*]] = arith.addi %{{.*}}, %{{.*}} : index -// CHECK-NEXT: br ^bb1(%[[iv0]] : index) +// CHECK-NEXT: cf.br ^bb1(%[[iv0]] : index) // CHECK-NEXT: ^bb8: // pred: ^bb1 // CHECK-NEXT: return // CHECK-NEXT: } @@ -150,12 +150,12 @@ // CHECK-LABEL: func @simple_if_yield func @simple_if_yield(%arg0: i1) -> (i1, i1) { -// CHECK: cond_br %{{.*}}, ^[[then:.*]], ^[[else:.*]] +// CHECK: cf.cond_br %{{.*}}, ^[[then:.*]], ^[[else:.*]] %0:2 = scf.if %arg0 -> (i1, i1) { // CHECK: ^[[then]]: // CHECK: %[[v0:.*]] = arith.constant false // CHECK: %[[v1:.*]] = arith.constant true -// CHECK: br ^[[dom:.*]](%[[v0]], %[[v1]] : i1, i1) +// CHECK: cf.br ^[[dom:.*]](%[[v0]], %[[v1]] : i1, i1) %c0 = arith.constant false %c1 = arith.constant true scf.yield %c0, %c1 : i1, i1 @@ -163,13 +163,13 @@ // CHECK: ^[[else]]: // CHECK: %[[v2:.*]] = arith.constant false // CHECK: %[[v3:.*]] = arith.constant true -// CHECK: br ^[[dom]](%[[v3]], %[[v2]] : i1, i1) +// CHECK: cf.br ^[[dom]](%[[v3]], %[[v2]] : i1, i1) %c0 = arith.constant false %c1 = arith.constant true scf.yield %c1, %c0 : i1, i1 } // CHECK: ^[[dom]](%[[arg1:.*]]: i1, %[[arg2:.*]]: i1): -// CHECK: br ^[[cont:.*]] +// CHECK: cf.br ^[[cont:.*]] // CHECK: ^[[cont]]: // CHECK: return %[[arg1]], %[[arg2]] return %0#0, %0#1 : i1, i1 @@ -177,49 +177,49 @@ // CHECK-LABEL: func @nested_if_yield func @nested_if_yield(%arg0: i1) -> (index) { -// CHECK: cond_br %{{.*}}, ^[[first_then:.*]], ^[[first_else:.*]] +// CHECK: cf.cond_br %{{.*}}, ^[[first_then:.*]], ^[[first_else:.*]] %0 = scf.if %arg0 -> i1 { // CHECK: ^[[first_then]]: %1 = arith.constant true -// CHECK: br ^[[first_dom:.*]]({{.*}}) +// CHECK: cf.br ^[[first_dom:.*]]({{.*}}) scf.yield %1 : i1 } else { // CHECK: ^[[first_else]]: %2 = arith.constant false -// CHECK: br ^[[first_dom]]({{.*}}) +// CHECK: cf.br ^[[first_dom]]({{.*}}) scf.yield %2 : i1 } // CHECK: ^[[first_dom]](%[[arg1:.*]]: i1): -// CHECK: br ^[[first_cont:.*]] +// CHECK: cf.br ^[[first_cont:.*]] // CHECK: ^[[first_cont]]: -// CHECK: cond_br %[[arg1]], ^[[second_outer_then:.*]], ^[[second_outer_else:.*]] +// CHECK: cf.cond_br %[[arg1]], ^[[second_outer_then:.*]], ^[[second_outer_else:.*]] %1 = scf.if %0 -> index { // CHECK: ^[[second_outer_then]]: -// CHECK: cond_br %arg0, ^[[second_inner_then:.*]], ^[[second_inner_else:.*]] +// CHECK: cf.cond_br %arg0, ^[[second_inner_then:.*]], ^[[second_inner_else:.*]] %3 = scf.if %arg0 -> index { // CHECK: ^[[second_inner_then]]: %4 = arith.constant 40 : index -// CHECK: br ^[[second_inner_dom:.*]]({{.*}}) +// CHECK: cf.br ^[[second_inner_dom:.*]]({{.*}}) scf.yield %4 : index } else { // CHECK: ^[[second_inner_else]]: %5 = arith.constant 41 : index -// CHECK: br ^[[second_inner_dom]]({{.*}}) +// CHECK: cf.br ^[[second_inner_dom]]({{.*}}) scf.yield %5 : index } // CHECK: ^[[second_inner_dom]](%[[arg2:.*]]: index): -// CHECK: br ^[[second_inner_cont:.*]] +// CHECK: cf.br ^[[second_inner_cont:.*]] // CHECK: ^[[second_inner_cont]]: -// CHECK: br ^[[second_outer_dom:.*]]({{.*}}) +// CHECK: cf.br ^[[second_outer_dom:.*]]({{.*}}) scf.yield %3 : index } else { // CHECK: ^[[second_outer_else]]: %6 = arith.constant 42 : index -// CHECK: br ^[[second_outer_dom]]({{.*}} +// CHECK: cf.br ^[[second_outer_dom]]({{.*}} scf.yield %6 : index } // CHECK: ^[[second_outer_dom]](%[[arg3:.*]]: index): -// CHECK: br ^[[second_outer_cont:.*]] +// CHECK: cf.br ^[[second_outer_cont:.*]] // CHECK: ^[[second_outer_cont]]: // CHECK: return %[[arg3]] : index return %1 : index @@ -228,22 +228,22 @@ // CHECK-LABEL: func @parallel_loop( // CHECK-SAME: [[VAL_0:%.*]]: index, [[VAL_1:%.*]]: index, [[VAL_2:%.*]]: index, [[VAL_3:%.*]]: index, [[VAL_4:%.*]]: index) { // CHECK: [[VAL_5:%.*]] = arith.constant 1 : index -// CHECK: br ^bb1([[VAL_0]] : index) +// CHECK: cf.br ^bb1([[VAL_0]] : index) // CHECK: ^bb1([[VAL_6:%.*]]: index): // CHECK: [[VAL_7:%.*]] = arith.cmpi slt, [[VAL_6]], [[VAL_2]] : index -// CHECK: cond_br [[VAL_7]], ^bb2, ^bb6 +// CHECK: cf.cond_br [[VAL_7]], ^bb2, ^bb6 // CHECK: ^bb2: -// CHECK: br ^bb3([[VAL_1]] : index) +// CHECK: cf.br ^bb3([[VAL_1]] : index) // CHECK: ^bb3([[VAL_8:%.*]]: index): // CHECK: [[VAL_9:%.*]] = arith.cmpi slt, [[VAL_8]], [[VAL_3]] : index -// CHECK: cond_br [[VAL_9]], ^bb4, ^bb5 +// CHECK: cf.cond_br [[VAL_9]], ^bb4, ^bb5 // CHECK: ^bb4: // CHECK: [[VAL_10:%.*]] = arith.constant 1 : index // CHECK: [[VAL_11:%.*]] = arith.addi [[VAL_8]], [[VAL_5]] : index -// CHECK: br ^bb3([[VAL_11]] : index) +// CHECK: cf.br ^bb3([[VAL_11]] : index) // CHECK: ^bb5: // CHECK: [[VAL_12:%.*]] = arith.addi [[VAL_6]], [[VAL_4]] : index -// CHECK: br ^bb1([[VAL_12]] : index) +// CHECK: cf.br ^bb1([[VAL_12]] : index) // CHECK: ^bb6: // CHECK: return // CHECK: } @@ -262,16 +262,16 @@ // CHECK-SAME: (%[[LB:.*]]: index, %[[UB:.*]]: index, %[[STEP:.*]]: index) // CHECK: %[[INIT0:.*]] = arith.constant 0 // CHECK: %[[INIT1:.*]] = arith.constant 1 -// CHECK: br ^[[COND:.*]](%[[LB]], %[[INIT0]], %[[INIT1]] : index, f32, f32) +// CHECK: cf.br ^[[COND:.*]](%[[LB]], %[[INIT0]], %[[INIT1]] : index, f32, f32) // // CHECK: ^[[COND]](%[[ITER:.*]]: index, %[[ITER_ARG0:.*]]: f32, %[[ITER_ARG1:.*]]: f32): // CHECK: %[[CMP:.*]] = arith.cmpi slt, %[[ITER]], %[[UB]] : index -// CHECK: cond_br %[[CMP]], ^[[BODY:.*]], ^[[CONTINUE:.*]] +// CHECK: cf.cond_br %[[CMP]], ^[[BODY:.*]], ^[[CONTINUE:.*]] // // CHECK: ^[[BODY]]: // CHECK: %[[SUM:.*]] = arith.addf %[[ITER_ARG0]], %[[ITER_ARG1]] : f32 // CHECK: %[[STEPPED:.*]] = arith.addi %[[ITER]], %[[STEP]] : index -// CHECK: br ^[[COND]](%[[STEPPED]], %[[SUM]], %[[SUM]] : index, f32, f32) +// CHECK: cf.br ^[[COND]](%[[STEPPED]], %[[SUM]], %[[SUM]] : index, f32, f32) // // CHECK: ^[[CONTINUE]]: // CHECK: return %[[ITER_ARG0]], %[[ITER_ARG1]] : f32, f32 @@ -288,18 +288,18 @@ // CHECK-LABEL: @nested_for_yield // CHECK-SAME: (%[[LB:.*]]: index, %[[UB:.*]]: index, %[[STEP:.*]]: index) // CHECK: %[[INIT:.*]] = arith.constant -// CHECK: br ^[[COND_OUT:.*]](%[[LB]], %[[INIT]] : index, f32) +// CHECK: cf.br ^[[COND_OUT:.*]](%[[LB]], %[[INIT]] : index, f32) // CHECK: ^[[COND_OUT]](%[[ITER_OUT:.*]]: index, %[[ARG_OUT:.*]]: f32): -// CHECK: cond_br %{{.*}}, ^[[BODY_OUT:.*]], ^[[CONT_OUT:.*]] +// CHECK: cf.cond_br %{{.*}}, ^[[BODY_OUT:.*]], ^[[CONT_OUT:.*]] // CHECK: ^[[BODY_OUT]]: -// CHECK: br ^[[COND_IN:.*]](%[[LB]], %[[ARG_OUT]] : index, f32) +// CHECK: cf.br ^[[COND_IN:.*]](%[[LB]], %[[ARG_OUT]] : index, f32) // CHECK: ^[[COND_IN]](%[[ITER_IN:.*]]: index, %[[ARG_IN:.*]]: f32): -// CHECK: cond_br %{{.*}}, ^[[BODY_IN:.*]], ^[[CONT_IN:.*]] +// CHECK: cf.cond_br %{{.*}}, ^[[BODY_IN:.*]], ^[[CONT_IN:.*]] // CHECK: ^[[BODY_IN]] // CHECK: %[[RES:.*]] = arith.addf -// CHECK: br ^[[COND_IN]](%{{.*}}, %[[RES]] : index, f32) +// CHECK: cf.br ^[[COND_IN]](%{{.*}}, %[[RES]] : index, f32) // CHECK: ^[[CONT_IN]]: -// CHECK: br ^[[COND_OUT]](%{{.*}}, %[[ARG_IN]] : index, f32) +// CHECK: cf.br ^[[COND_OUT]](%{{.*}}, %[[ARG_IN]] : index, f32) // CHECK: ^[[CONT_OUT]]: // CHECK: return %[[ARG_OUT]] : f32 func @nested_for_yield(%arg0 : index, %arg1 : index, %arg2 : index) -> f32 { @@ -325,13 +325,13 @@ // passed across as a block argument. // Branch to the condition block passing in the initial reduction value. - // CHECK: br ^[[COND:.*]](%[[LB]], %[[INIT]] + // CHECK: cf.br ^[[COND:.*]](%[[LB]], %[[INIT]] // Condition branch takes as arguments the current value of the iteration // variable and the current partially reduced value. // CHECK: ^[[COND]](%[[ITER:.*]]: index, %[[ITER_ARG:.*]]: f32 // CHECK: %[[COMP:.*]] = arith.cmpi slt, %[[ITER]], %[[UB]] - // CHECK: cond_br %[[COMP]], ^[[BODY:.*]], ^[[CONTINUE:.*]] + // CHECK: cf.cond_br %[[COMP]], ^[[BODY:.*]], ^[[CONTINUE:.*]] // Bodies of scf.reduce operations are folded into the main loop body. The // result of this partial reduction is passed as argument to the condition @@ -340,7 +340,7 @@ // CHECK: %[[CST:.*]] = arith.constant 4.2 // CHECK: %[[PROD:.*]] = arith.mulf %[[ITER_ARG]], %[[CST]] // CHECK: %[[INCR:.*]] = arith.addi %[[ITER]], %[[STEP]] - // CHECK: br ^[[COND]](%[[INCR]], %[[PROD]] + // CHECK: cf.br ^[[COND]](%[[INCR]], %[[PROD]] // The continuation block has access to the (last value of) reduction. // CHECK: ^[[CONTINUE]]: @@ -363,19 +363,19 @@ // Multiple reduction blocks should be folded in the same body, and the // reduction value must be forwarded through block structures. // CHECK: %[[INIT2:.*]] = arith.constant 42 - // CHECK: br ^[[COND_OUT:.*]](%{{.*}}, %[[INIT1]], %[[INIT2]] + // CHECK: cf.br ^[[COND_OUT:.*]](%{{.*}}, %[[INIT1]], %[[INIT2]] // CHECK: ^[[COND_OUT]](%{{.*}}: index, %[[ITER_ARG1_OUT:.*]]: f32, %[[ITER_ARG2_OUT:.*]]: i64 - // CHECK: cond_br %{{.*}}, ^[[BODY_OUT:.*]], ^[[CONT_OUT:.*]] + // CHECK: cf.cond_br %{{.*}}, ^[[BODY_OUT:.*]], ^[[CONT_OUT:.*]] // CHECK: ^[[BODY_OUT]]: - // CHECK: br ^[[COND_IN:.*]](%{{.*}}, %[[ITER_ARG1_OUT]], %[[ITER_ARG2_OUT]] + // CHECK: cf.br ^[[COND_IN:.*]](%{{.*}}, %[[ITER_ARG1_OUT]], %[[ITER_ARG2_OUT]] // CHECK: ^[[COND_IN]](%{{.*}}: index, %[[ITER_ARG1_IN:.*]]: f32, %[[ITER_ARG2_IN:.*]]: i64 - // CHECK: cond_br %{{.*}}, ^[[BODY_IN:.*]], ^[[CONT_IN:.*]] + // CHECK: cf.cond_br %{{.*}}, ^[[BODY_IN:.*]], ^[[CONT_IN:.*]] // CHECK: ^[[BODY_IN]]: // CHECK: %[[REDUCE1:.*]] = arith.addf %[[ITER_ARG1_IN]], %{{.*}} // CHECK: %[[REDUCE2:.*]] = arith.ori %[[ITER_ARG2_IN]], %{{.*}} - // CHECK: br ^[[COND_IN]](%{{.*}}, %[[REDUCE1]], %[[REDUCE2]] + // CHECK: cf.br ^[[COND_IN]](%{{.*}}, %[[REDUCE1]], %[[REDUCE2]] // CHECK: ^[[CONT_IN]]: - // CHECK: br ^[[COND_OUT]](%{{.*}}, %[[ITER_ARG1_IN]], %[[ITER_ARG2_IN]] + // CHECK: cf.br ^[[COND_OUT]](%{{.*}}, %[[ITER_ARG1_IN]], %[[ITER_ARG2_IN]] // CHECK: ^[[CONT_OUT]]: // CHECK: return %[[ITER_ARG1_OUT]], %[[ITER_ARG2_OUT]] %step = arith.constant 1 : index @@ -416,17 +416,17 @@ // CHECK-LABEL: @minimal_while func @minimal_while() { // CHECK: %[[COND:.*]] = "test.make_condition"() : () -> i1 - // CHECK: br ^[[BEFORE:.*]] + // CHECK: cf.br ^[[BEFORE:.*]] %0 = "test.make_condition"() : () -> i1 scf.while : () -> () { // CHECK: ^[[BEFORE]]: - // CHECK: cond_br %[[COND]], ^[[AFTER:.*]], ^[[CONT:.*]] + // CHECK: cf.cond_br %[[COND]], ^[[AFTER:.*]], ^[[CONT:.*]] scf.condition(%0) } do { // CHECK: ^[[AFTER]]: // CHECK: "test.some_payload"() : () -> () "test.some_payload"() : () -> () - // CHECK: br ^[[BEFORE]] + // CHECK: cf.br ^[[BEFORE]] scf.yield } // CHECK: ^[[CONT]]: @@ -436,16 +436,16 @@ // CHECK-LABEL: @do_while func @do_while(%arg0: f32) { - // CHECK: br ^[[BEFORE:.*]]({{.*}}: f32) + // CHECK: cf.br ^[[BEFORE:.*]]({{.*}}: f32) scf.while (%arg1 = %arg0) : (f32) -> (f32) { // CHECK: ^[[BEFORE]](%[[VAL:.*]]: f32): // CHECK: %[[COND:.*]] = "test.make_condition"() : () -> i1 %0 = "test.make_condition"() : () -> i1 - // CHECK: cond_br %[[COND]], ^[[BEFORE]](%[[VAL]] : f32), ^[[CONT:.*]] + // CHECK: cf.cond_br %[[COND]], ^[[BEFORE]](%[[VAL]] : f32), ^[[CONT:.*]] scf.condition(%0) %arg1 : f32 } do { ^bb0(%arg2: f32): - // CHECK-NOT: br ^[[BEFORE]] + // CHECK-NOT: cf.br ^[[BEFORE]] scf.yield %arg2 : f32 } // CHECK: ^[[CONT]]: @@ -460,21 +460,21 @@ %0 = "test.make_condition"() : () -> i1 %c0_i32 = arith.constant 0 : i32 %cst = arith.constant 0.000000e+00 : f32 - // CHECK: br ^[[BEFORE:.*]](%[[ARG0]], %[[ARG1]] : i32, f32) + // CHECK: cf.br ^[[BEFORE:.*]](%[[ARG0]], %[[ARG1]] : i32, f32) %1:2 = scf.while (%arg2 = %arg0, %arg3 = %arg1) : (i32, f32) -> (i64, f64) { // CHECK: ^bb1(%[[ARG2:.*]]: i32, %[[ARG3:.]]: f32): // CHECK: %[[VAL1:.*]] = arith.extui %[[ARG0]] : i32 to i64 %2 = arith.extui %arg0 : i32 to i64 // CHECK: %[[VAL2:.*]] = arith.extf %[[ARG3]] : f32 to f64 %3 = arith.extf %arg3 : f32 to f64 - // CHECK: cond_br %[[COND]], + // CHECK: cf.cond_br %[[COND]], // CHECK: ^[[AFTER:.*]](%[[VAL1]], %[[VAL2]] : i64, f64), // CHECK: ^[[CONT:.*]] scf.condition(%0) %2, %3 : i64, f64 } do { // CHECK: ^[[AFTER]](%[[ARG4:.*]]: i64, %[[ARG5:.*]]: f64): ^bb0(%arg2: i64, %arg3: f64): - // CHECK: br ^[[BEFORE]](%{{.*}}, %{{.*}} : i32, f32) + // CHECK: cf.br ^[[BEFORE]](%{{.*}}, %{{.*}} : i32, f32) scf.yield %c0_i32, %cst : i32, f32 } // CHECK: ^bb3: @@ -484,17 +484,17 @@ // CHECK-LABEL: @nested_while_ops func @nested_while_ops(%arg0: f32) -> i64 { - // CHECK: br ^[[OUTER_BEFORE:.*]](%{{.*}} : f32) + // CHECK: cf.br ^[[OUTER_BEFORE:.*]](%{{.*}} : f32) %0 = scf.while(%outer = %arg0) : (f32) -> i64 { // CHECK: ^[[OUTER_BEFORE]](%{{.*}}: f32): // CHECK: %[[OUTER_COND:.*]] = "test.outer_before_pre"() : () -> i1 %cond = "test.outer_before_pre"() : () -> i1 - // CHECK: br ^[[INNER_BEFORE_BEFORE:.*]](%{{.*}} : f32) + // CHECK: cf.br ^[[INNER_BEFORE_BEFORE:.*]](%{{.*}} : f32) %1 = scf.while(%inner = %outer) : (f32) -> i64 { // CHECK: ^[[INNER_BEFORE_BEFORE]](%{{.*}}: f32): // CHECK: %[[INNER1:.*]]:2 = "test.inner_before"(%{{.*}}) : (f32) -> (i1, i64) %2:2 = "test.inner_before"(%inner) : (f32) -> (i1, i64) - // CHECK: cond_br %[[INNER1]]#0, + // CHECK: cf.cond_br %[[INNER1]]#0, // CHECK: ^[[INNER_BEFORE_AFTER:.*]](%[[INNER1]]#1 : i64), // CHECK: ^[[OUTER_BEFORE_LAST:.*]] scf.condition(%2#0) %2#1 : i64 @@ -503,13 +503,13 @@ ^bb0(%arg1: i64): // CHECK: %[[INNER2:.*]] = "test.inner_after"(%{{.*}}) : (i64) -> f32 %3 = "test.inner_after"(%arg1) : (i64) -> f32 - // CHECK: br ^[[INNER_BEFORE_BEFORE]](%[[INNER2]] : f32) + // CHECK: cf.br ^[[INNER_BEFORE_BEFORE]](%[[INNER2]] : f32) scf.yield %3 : f32 } // CHECK: ^[[OUTER_BEFORE_LAST]]: // CHECK: "test.outer_before_post"() : () -> () "test.outer_before_post"() : () -> () - // CHECK: cond_br %[[OUTER_COND]], + // CHECK: cf.cond_br %[[OUTER_COND]], // CHECK: ^[[OUTER_AFTER:.*]](%[[INNER1]]#1 : i64), // CHECK: ^[[CONTINUATION:.*]] scf.condition(%cond) %1 : i64 @@ -518,12 +518,12 @@ ^bb2(%arg2: i64): // CHECK: "test.outer_after_pre"(%{{.*}}) : (i64) -> () "test.outer_after_pre"(%arg2) : (i64) -> () - // CHECK: br ^[[INNER_AFTER_BEFORE:.*]](%{{.*}} : i64) + // CHECK: cf.br ^[[INNER_AFTER_BEFORE:.*]](%{{.*}} : i64) %4 = scf.while(%inner = %arg2) : (i64) -> f32 { // CHECK: ^[[INNER_AFTER_BEFORE]](%{{.*}}: i64): // CHECK: %[[INNER3:.*]]:2 = "test.inner2_before"(%{{.*}}) : (i64) -> (i1, f32) %5:2 = "test.inner2_before"(%inner) : (i64) -> (i1, f32) - // CHECK: cond_br %[[INNER3]]#0, + // CHECK: cf.cond_br %[[INNER3]]#0, // CHECK: ^[[INNER_AFTER_AFTER:.*]](%[[INNER3]]#1 : f32), // CHECK: ^[[OUTER_AFTER_LAST:.*]] scf.condition(%5#0) %5#1 : f32 @@ -532,13 +532,13 @@ ^bb3(%arg3: f32): // CHECK: %{{.*}} = "test.inner2_after"(%{{.*}}) : (f32) -> i64 %6 = "test.inner2_after"(%arg3) : (f32) -> i64 - // CHECK: br ^[[INNER_AFTER_BEFORE]](%{{.*}} : i64) + // CHECK: cf.br ^[[INNER_AFTER_BEFORE]](%{{.*}} : i64) scf.yield %6 : i64 } // CHECK: ^[[OUTER_AFTER_LAST]]: // CHECK: "test.outer_after_post"() : () -> () "test.outer_after_post"() : () -> () - // CHECK: br ^[[OUTER_BEFORE]](%[[INNER3]]#1 : f32) + // CHECK: cf.br ^[[OUTER_BEFORE]](%[[INNER3]]#1 : f32) scf.yield %4 : f32 } // CHECK: ^[[CONTINUATION]]: @@ -549,27 +549,27 @@ // CHECK-LABEL: @ifs_in_parallel // CHECK: (%[[ARG0:.*]]: index, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index, %[[ARG3:.*]]: i1, %[[ARG4:.*]]: i1) func @ifs_in_parallel(%arg1: index, %arg2: index, %arg3: index, %arg4: i1, %arg5: i1) { - // CHECK: br ^[[LOOP_LATCH:.*]](%[[ARG0]] : index) + // CHECK: cf.br ^[[LOOP_LATCH:.*]](%[[ARG0]] : index) // CHECK: ^[[LOOP_LATCH]](%[[LOOP_IV:.*]]: index): // CHECK: %[[LOOP_COND:.*]] = arith.cmpi slt, %[[LOOP_IV]], %[[ARG1]] : index - // CHECK: cond_br %[[LOOP_COND]], ^[[LOOP_BODY:.*]], ^[[LOOP_CONT:.*]] + // CHECK: cf.cond_br %[[LOOP_COND]], ^[[LOOP_BODY:.*]], ^[[LOOP_CONT:.*]] // CHECK: ^[[LOOP_BODY]]: - // CHECK: cond_br %[[ARG3]], ^[[IF1_THEN:.*]], ^[[IF1_CONT:.*]] + // CHECK: cf.cond_br %[[ARG3]], ^[[IF1_THEN:.*]], ^[[IF1_CONT:.*]] // CHECK: ^[[IF1_THEN]]: - // CHECK: cond_br %[[ARG4]], ^[[IF2_THEN:.*]], ^[[IF2_ELSE:.*]] + // CHECK: cf.cond_br %[[ARG4]], ^[[IF2_THEN:.*]], ^[[IF2_ELSE:.*]] // CHECK: ^[[IF2_THEN]]: // CHECK: %{{.*}} = "test.if2"() : () -> index - // CHECK: br ^[[IF2_MERGE:.*]](%{{.*}} : index) + // CHECK: cf.br ^[[IF2_MERGE:.*]](%{{.*}} : index) // CHECK: ^[[IF2_ELSE]]: // CHECK: %{{.*}} = "test.else2"() : () -> index - // CHECK: br ^[[IF2_MERGE]](%{{.*}} : index) + // CHECK: cf.br ^[[IF2_MERGE]](%{{.*}} : index) // CHECK: ^[[IF2_MERGE]](%{{.*}}: index): - // CHECK: br ^[[IF2_CONT:.*]] + // CHECK: cf.br ^[[IF2_CONT:.*]] // CHECK: ^[[IF2_CONT]]: - // CHECK: br ^[[IF1_CONT]] + // CHECK: cf.br ^[[IF1_CONT]] // CHECK: ^[[IF1_CONT]]: // CHECK: %{{.*}} = arith.addi %[[LOOP_IV]], %[[ARG2]] : index - // CHECK: br ^[[LOOP_LATCH]](%{{.*}} : index) + // CHECK: cf.br ^[[LOOP_LATCH]](%{{.*}} : index) scf.parallel (%i) = (%arg1) to (%arg2) step (%arg3) { scf.if %arg4 { %0 = scf.if %arg5 -> (index) { @@ -593,7 +593,7 @@ "test.foo"() : () -> () %v = scf.execute_region -> i64 { %c = "test.cmp"() : () -> i1 - cond_br %c, ^bb2, ^bb3 + cf.cond_br %c, ^bb2, ^bb3 ^bb2: %x = "test.val1"() : () -> i64 scf.yield %x : i64 @@ -607,16 +607,16 @@ // CHECK-NOT: execute_region // CHECK: "test.foo" -// CHECK: br ^[[rentry:.+]] +// CHECK: cf.br ^[[rentry:.+]] // CHECK: ^[[rentry]] // CHECK: %[[cmp:.+]] = "test.cmp" -// CHECK: cond_br %[[cmp]], ^[[bb1:.+]], ^[[bb2:.+]] +// CHECK: cf.cond_br %[[cmp]], ^[[bb1:.+]], ^[[bb2:.+]] // CHECK: ^[[bb1]]: // CHECK: %[[x:.+]] = "test.val1" -// CHECK: br ^[[bb3:.+]](%[[x]] : i64) +// CHECK: cf.br ^[[bb3:.+]](%[[x]] : i64) // CHECK: ^[[bb2]]: // CHECK: %[[y:.+]] = "test.val2" -// CHECK: br ^[[bb3]](%[[y:.+]] : i64) +// CHECK: cf.br ^[[bb3]](%[[y:.+]] : i64) // CHECK: ^[[bb3]](%[[z:.+]]: i64): // CHECK: "test.bar"(%[[z]]) // CHECK: return diff --git a/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir b/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir --- a/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir +++ b/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir @@ -6,7 +6,7 @@ // CHECK-SAME: %[[RHS:.*]]: tensor) -> !shape.witness { // CHECK: %[[RET:.*]] = shape.const_witness true // CHECK: %[[BROADCAST_IS_VALID:.*]] = shape.is_broadcastable %[[LHS]], %[[RHS]] -// CHECK: assert %[[BROADCAST_IS_VALID]], "required broadcastable shapes" +// CHECK: cf.assert %[[BROADCAST_IS_VALID]], "required broadcastable shapes" // CHECK: return %[[RET]] : !shape.witness // CHECK: } func @cstr_broadcastable(%arg0: tensor, %arg1: tensor) -> !shape.witness { @@ -19,7 +19,7 @@ // CHECK-SAME: %[[RHS:.*]]: tensor) -> !shape.witness { // CHECK: %[[RET:.*]] = shape.const_witness true // CHECK: %[[EQUAL_IS_VALID:.*]] = shape.shape_eq %[[LHS]], %[[RHS]] -// CHECK: assert %[[EQUAL_IS_VALID]], "required equal shapes" +// CHECK: cf.assert %[[EQUAL_IS_VALID]], "required equal shapes" // CHECK: return %[[RET]] : !shape.witness // CHECK: } func @cstr_eq(%arg0: tensor, %arg1: tensor) -> !shape.witness { @@ -30,7 +30,7 @@ // CHECK-LABEL: func @cstr_require func @cstr_require(%arg0: i1) -> !shape.witness { // CHECK: %[[RET:.*]] = shape.const_witness true - // CHECK: assert %arg0, "msg" + // CHECK: cf.assert %arg0, "msg" // CHECK: return %[[RET]] %witness = shape.cstr_require %arg0, "msg" return %witness : !shape.witness diff --git a/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir b/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir --- a/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir @@ -29,7 +29,7 @@ //CHECK-LABEL: llvm.func @pass_through(%arg0: !llvm.ptr>) -> !llvm.ptr> { func @pass_through(%arg0: () -> ()) -> (() -> ()) { // CHECK-NEXT: llvm.br ^bb1(%arg0 : !llvm.ptr>) - br ^bb1(%arg0 : () -> ()) + cf.br ^bb1(%arg0 : () -> ()) //CHECK-NEXT: ^bb1(%0: !llvm.ptr>): ^bb1(%bbarg: () -> ()): diff --git a/mlir/test/Conversion/StandardToLLVM/func-memref.mlir b/mlir/test/Conversion/StandardToLLVM/func-memref.mlir --- a/mlir/test/Conversion/StandardToLLVM/func-memref.mlir +++ b/mlir/test/Conversion/StandardToLLVM/func-memref.mlir @@ -109,17 +109,17 @@ // This test checks that in the BAREPTR case, the branch arguments only forward the descriptor. // This test was lowered from a simple scf.for that swaps 2 memref iter_args. // BAREPTR: llvm.br ^bb1(%{{.*}}, %{{.*}}, %{{.*}} : i64, !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>, !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>) - br ^bb1(%arg0, %base0, %base1 : index, memref<64xi32, 201>, memref<64xi32, 201>) + cf.br ^bb1(%arg0, %base0, %base1 : index, memref<64xi32, 201>, memref<64xi32, 201>) // BAREPTR-NEXT: ^bb1 // BAREPTR-NEXT: llvm.icmp // BAREPTR-NEXT: llvm.cond_br %{{.*}}, ^bb2, ^bb3 ^bb1(%0: index, %1: memref<64xi32, 201>, %2: memref<64xi32, 201>): // 2 preds: ^bb0, ^bb2 %3 = arith.cmpi slt, %0, %arg1 : index - cond_br %3, ^bb2, ^bb3 + cf.cond_br %3, ^bb2, ^bb3 ^bb2: // pred: ^bb1 %4 = arith.addi %0, %arg2 : index - br ^bb1(%4, %2, %1 : index, memref<64xi32, 201>, memref<64xi32, 201>) + cf.br ^bb1(%4, %2, %1 : index, memref<64xi32, 201>, memref<64xi32, 201>) ^bb3: // pred: ^bb1 return %1, %2 : memref<64xi32, 201>, memref<64xi32, 201> } diff --git a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir --- a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir +++ b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir @@ -18,7 +18,7 @@ ^bb0: // CHECK-NEXT: llvm.br ^bb1 // CHECK32-NEXT: llvm.br ^bb1 - br ^bb1 + cf.br ^bb1 // CHECK-NEXT: ^bb1: // pred: ^bb0 // CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64 @@ -31,7 +31,7 @@ ^bb1: // pred: ^bb0 %c1 = arith.constant 1 : index %c42 = arith.constant 42 : index - br ^bb2(%c1 : index) + cf.br ^bb2(%c1 : index) // CHECK: ^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb3 // CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 @@ -41,7 +41,7 @@ // CHECK32-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4 ^bb2(%0: index): // 2 preds: ^bb1, ^bb3 %1 = arith.cmpi slt, %0, %c42 : index - cond_br %1, ^bb3, ^bb4 + cf.cond_br %1, ^bb3, ^bb4 // CHECK: ^bb3: // pred: ^bb2 // CHECK-NEXT: llvm.call @body({{.*}}) : (i64) -> () @@ -57,7 +57,7 @@ call @body(%0) : (index) -> () %c1_0 = arith.constant 1 : index %2 = arith.addi %0, %c1_0 : index - br ^bb2(%2 : index) + cf.br ^bb2(%2 : index) // CHECK: ^bb4: // pred: ^bb2 // CHECK-NEXT: llvm.return @@ -111,7 +111,7 @@ func @func_args(i32, i32) -> i32 { ^bb0(%arg0: i32, %arg1: i32): %c0_i32 = arith.constant 0 : i32 - br ^bb1 + cf.br ^bb1 // CHECK-NEXT: ^bb1: // pred: ^bb0 // CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64 @@ -124,7 +124,7 @@ ^bb1: // pred: ^bb0 %c0 = arith.constant 0 : index %c42 = arith.constant 42 : index - br ^bb2(%c0 : index) + cf.br ^bb2(%c0 : index) // CHECK-NEXT: ^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb3 // CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 @@ -134,7 +134,7 @@ // CHECK32-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4 ^bb2(%0: index): // 2 preds: ^bb1, ^bb3 %1 = arith.cmpi slt, %0, %c42 : index - cond_br %1, ^bb3, ^bb4 + cf.cond_br %1, ^bb3, ^bb4 // CHECK-NEXT: ^bb3: // pred: ^bb2 // CHECK-NEXT: {{.*}} = llvm.call @body_args({{.*}}) : (i64) -> i64 @@ -159,7 +159,7 @@ %5 = call @other(%2, %arg1) : (index, i32) -> i32 %c1 = arith.constant 1 : index %6 = arith.addi %0, %c1 : index - br ^bb2(%6 : index) + cf.br ^bb2(%6 : index) // CHECK-NEXT: ^bb4: // pred: ^bb2 // CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64 @@ -191,7 +191,7 @@ // CHECK-NEXT: llvm.br ^bb1 func @imperfectly_nested_loops() { ^bb0: - br ^bb1 + cf.br ^bb1 // CHECK-NEXT: ^bb1: // pred: ^bb0 // CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64 @@ -200,21 +200,21 @@ ^bb1: // pred: ^bb0 %c0 = arith.constant 0 : index %c42 = arith.constant 42 : index - br ^bb2(%c0 : index) + cf.br ^bb2(%c0 : index) // CHECK-NEXT: ^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb7 // CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 // CHECK-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb8 ^bb2(%0: index): // 2 preds: ^bb1, ^bb7 %1 = arith.cmpi slt, %0, %c42 : index - cond_br %1, ^bb3, ^bb8 + cf.cond_br %1, ^bb3, ^bb8 // CHECK-NEXT: ^bb3: // CHECK-NEXT: llvm.call @pre({{.*}}) : (i64) -> () // CHECK-NEXT: llvm.br ^bb4 ^bb3: // pred: ^bb2 call @pre(%0) : (index) -> () - br ^bb4 + cf.br ^bb4 // CHECK-NEXT: ^bb4: // pred: ^bb3 // CHECK-NEXT: {{.*}} = llvm.mlir.constant(7 : index) : i64 @@ -223,14 +223,14 @@ ^bb4: // pred: ^bb3 %c7 = arith.constant 7 : index %c56 = arith.constant 56 : index - br ^bb5(%c7 : index) + cf.br ^bb5(%c7 : index) // CHECK-NEXT: ^bb5({{.*}}: i64): // 2 preds: ^bb4, ^bb6 // CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 // CHECK-NEXT: llvm.cond_br {{.*}}, ^bb6, ^bb7 ^bb5(%2: index): // 2 preds: ^bb4, ^bb6 %3 = arith.cmpi slt, %2, %c56 : index - cond_br %3, ^bb6, ^bb7 + cf.cond_br %3, ^bb6, ^bb7 // CHECK-NEXT: ^bb6: // pred: ^bb5 // CHECK-NEXT: llvm.call @body2({{.*}}, {{.*}}) : (i64, i64) -> () @@ -241,7 +241,7 @@ call @body2(%0, %2) : (index, index) -> () %c2 = arith.constant 2 : index %4 = arith.addi %2, %c2 : index - br ^bb5(%4 : index) + cf.br ^bb5(%4 : index) // CHECK-NEXT: ^bb7: // pred: ^bb5 // CHECK-NEXT: llvm.call @post({{.*}}) : (i64) -> () @@ -252,7 +252,7 @@ call @post(%0) : (index) -> () %c1 = arith.constant 1 : index %5 = arith.addi %0, %c1 : index - br ^bb2(%5 : index) + cf.br ^bb2(%5 : index) // CHECK-NEXT: ^bb8: // pred: ^bb2 // CHECK-NEXT: llvm.return @@ -316,49 +316,49 @@ // CHECK-NEXT: } func @more_imperfectly_nested_loops() { ^bb0: - br ^bb1 + cf.br ^bb1 ^bb1: // pred: ^bb0 %c0 = arith.constant 0 : index %c42 = arith.constant 42 : index - br ^bb2(%c0 : index) + cf.br ^bb2(%c0 : index) ^bb2(%0: index): // 2 preds: ^bb1, ^bb11 %1 = arith.cmpi slt, %0, %c42 : index - cond_br %1, ^bb3, ^bb12 + cf.cond_br %1, ^bb3, ^bb12 ^bb3: // pred: ^bb2 call @pre(%0) : (index) -> () - br ^bb4 + cf.br ^bb4 ^bb4: // pred: ^bb3 %c7 = arith.constant 7 : index %c56 = arith.constant 56 : index - br ^bb5(%c7 : index) + cf.br ^bb5(%c7 : index) ^bb5(%2: index): // 2 preds: ^bb4, ^bb6 %3 = arith.cmpi slt, %2, %c56 : index - cond_br %3, ^bb6, ^bb7 + cf.cond_br %3, ^bb6, ^bb7 ^bb6: // pred: ^bb5 call @body2(%0, %2) : (index, index) -> () %c2 = arith.constant 2 : index %4 = arith.addi %2, %c2 : index - br ^bb5(%4 : index) + cf.br ^bb5(%4 : index) ^bb7: // pred: ^bb5 call @mid(%0) : (index) -> () - br ^bb8 + cf.br ^bb8 ^bb8: // pred: ^bb7 %c18 = arith.constant 18 : index %c37 = arith.constant 37 : index - br ^bb9(%c18 : index) + cf.br ^bb9(%c18 : index) ^bb9(%5: index): // 2 preds: ^bb8, ^bb10 %6 = arith.cmpi slt, %5, %c37 : index - cond_br %6, ^bb10, ^bb11 + cf.cond_br %6, ^bb10, ^bb11 ^bb10: // pred: ^bb9 call @body3(%0, %5) : (index, index) -> () %c3 = arith.constant 3 : index %7 = arith.addi %5, %c3 : index - br ^bb9(%7 : index) + cf.br ^bb9(%7 : index) ^bb11: // pred: ^bb9 call @post(%0) : (index) -> () %c1 = arith.constant 1 : index %8 = arith.addi %0, %c1 : index - br ^bb2(%8 : index) + cf.br ^bb2(%8 : index) ^bb12: // pred: ^bb2 return } @@ -432,7 +432,7 @@ // CHECK-NEXT: %[[CST:.*]] = llvm.mlir.constant(42 : i32) : i32 %0 = arith.constant 42 : i32 // CHECK-NEXT: llvm.br ^bb2 - br ^bb2 + cf.br ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: %[[ADD:.*]] = llvm.add %arg0, %[[CST]] : i32 @@ -444,7 +444,7 @@ // CHECK-NEXT: ^bb2: ^bb2: // CHECK-NEXT: llvm.br ^bb1 - br ^bb1 + cf.br ^bb1 } // ----- @@ -469,7 +469,7 @@ // ----- -// Lowers `assert` to a function call to `abort` if the assertion is violated. +// Lowers `cf.assert` to a function call to `abort` if the assertion is violated. // CHECK: llvm.func @abort() // CHECK-LABEL: @assert_test_function // CHECK-SAME: (%[[ARG:.*]]: i1) @@ -480,7 +480,7 @@ // CHECK: ^[[FAILURE_BLOCK]]: // CHECK: llvm.call @abort() : () -> () // CHECK: llvm.unreachable - assert %arg, "Computer says no" + cf.assert %arg, "Computer says no" return } @@ -514,8 +514,8 @@ // CHECK-LABEL: func @switchi8( func @switchi8(%arg0 : i8) -> i32 { -switch %arg0 : i8, [ - default: ^bb1, + cf.switch %arg0 : i8, [ + default: ^bb1, 42: ^bb1, 43: ^bb3 ] diff --git a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir --- a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir +++ b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir @@ -900,45 +900,3 @@ // CHECK: spv.ReturnValue %[[VAL]] return %extract : i32 } - -// ----- - -//===----------------------------------------------------------------------===// -// std.br, std.cond_br -//===----------------------------------------------------------------------===// - -module attributes { - spv.target_env = #spv.target_env<#spv.vce, {}> -} { - -// CHECK-LABEL: func @simple_loop -func @simple_loop(index, index, index) { -^bb0(%begin : index, %end : index, %step : index): -// CHECK-NEXT: spv.Branch ^bb1 - br ^bb1 - -// CHECK-NEXT: ^bb1: // pred: ^bb0 -// CHECK-NEXT: spv.Branch ^bb2({{.*}} : i32) -^bb1: // pred: ^bb0 - br ^bb2(%begin : index) - -// CHECK: ^bb2({{.*}}: i32): // 2 preds: ^bb1, ^bb3 -// CHECK-NEXT: {{.*}} = spv.SLessThan {{.*}}, {{.*}} : i32 -// CHECK-NEXT: spv.BranchConditional {{.*}}, ^bb3, ^bb4 -^bb2(%0: index): // 2 preds: ^bb1, ^bb3 - %1 = arith.cmpi slt, %0, %end : index - cond_br %1, ^bb3, ^bb4 - -// CHECK: ^bb3: // pred: ^bb2 -// CHECK-NEXT: {{.*}} = spv.IAdd {{.*}}, {{.*}} : i32 -// CHECK-NEXT: spv.Branch ^bb2({{.*}} : i32) -^bb3: // pred: ^bb2 - %2 = arith.addi %0, %step : index - br ^bb2(%2 : index) - -// CHECK: ^bb4: // pred: ^bb2 -^bb4: // pred: ^bb2 - return -} - -} diff --git a/mlir/test/Dialect/Affine/invalid.mlir b/mlir/test/Dialect/Affine/invalid.mlir --- a/mlir/test/Dialect/Affine/invalid.mlir +++ b/mlir/test/Dialect/Affine/invalid.mlir @@ -56,9 +56,9 @@ ^bb0(%arg: index): affine.load %M[%arg] : memref<10xi32> // expected-error@-1 {{index must be a dimension or symbol identifier}} - br ^bb1 + cf.br ^bb1 ^bb1: - br ^bb1 + cf.br ^bb1 }) : () -> () return } diff --git a/mlir/test/Dialect/Async/async-runtime-ref-counting.mlir b/mlir/test/Dialect/Async/async-runtime-ref-counting.mlir --- a/mlir/test/Dialect/Async/async-runtime-ref-counting.mlir +++ b/mlir/test/Dialect/Async/async-runtime-ref-counting.mlir @@ -54,13 +54,13 @@ // CHECK-LABEL: @token_arg_cond_br_await_with_fallthough // CHECK: %[[TOKEN:.*]]: !async.token func @token_arg_cond_br_await_with_fallthough(%arg0: !async.token, %arg1: i1) { - // CHECK: cond_br + // CHECK: cf.cond_br // CHECK-SAME: ^[[BB1:.*]], ^[[BB2:.*]] - cond_br %arg1, ^bb1, ^bb2 + cf.cond_br %arg1, ^bb1, ^bb2 ^bb1: // CHECK: ^[[BB1]]: - // CHECK: br ^[[BB2]] - br ^bb2 + // CHECK: cf.br ^[[BB2]] + cf.br ^bb2 ^bb2: // CHECK: ^[[BB2]]: // CHECK: async.runtime.await %[[TOKEN]] @@ -88,10 +88,10 @@ async.runtime.resume %hdl async.coro.suspend %saved, ^suspend, ^resume, ^cleanup ^resume: - br ^cleanup + cf.br ^cleanup ^cleanup: async.coro.free %id, %hdl - br ^suspend + cf.br ^suspend ^suspend: async.coro.end %hdl return %token : !async.token @@ -109,10 +109,10 @@ // CHECK-NEXT: async.runtime.drop_ref %[[TOKEN]] {count = 1 : i64} async.coro.suspend %saved, ^suspend, ^resume, ^cleanup ^resume: - br ^cleanup + cf.br ^cleanup ^cleanup: async.coro.free %id, %hdl - br ^suspend + cf.br ^suspend ^suspend: async.coro.end %hdl return %token : !async.token @@ -137,10 +137,10 @@ %0 = async.runtime.load %arg0 : !async.value // CHECK: arith.addf %[[LOADED]], %[[LOADED]] %1 = arith.addf %0, %0 : f32 - br ^cleanup + cf.br ^cleanup ^cleanup: async.coro.free %id, %hdl - br ^suspend + cf.br ^suspend ^suspend: async.coro.end %hdl return %token : !async.token @@ -167,12 +167,12 @@ // CHECK: ^[[RESUME_1:.*]]: // CHECK: async.runtime.set_available async.runtime.set_available %0 : !async.token - br ^cleanup + cf.br ^cleanup ^cleanup: // CHECK: ^[[CLEANUP:.*]]: // CHECK: async.coro.free async.coro.free %1, %2 - br ^suspend + cf.br ^suspend ^suspend: // CHECK: ^[[SUSPEND:.*]]: // CHECK: async.coro.end @@ -198,7 +198,7 @@ // CHECK-LABEL: @token_defined_in_the_loop func @token_defined_in_the_loop() { - br ^bb1 + cf.br ^bb1 ^bb1: // CHECK: ^[[BB1:.*]]: // CHECK: %[[TOKEN:.*]] = call @token() @@ -207,7 +207,7 @@ // CHECK: async.runtime.drop_ref %[[TOKEN]] {count = 1 : i64} async.runtime.await %token : !async.token %0 = call @cond(): () -> (i1) - cond_br %0, ^bb1, ^bb2 + cf.cond_br %0, ^bb1, ^bb2 ^bb2: // CHECK: ^[[BB2:.*]]: // CHECK: return @@ -218,18 +218,18 @@ func @divergent_liveness_one_token(%arg0 : i1) { // CHECK: %[[TOKEN:.*]] = call @token() %token = call @token() : () -> !async.token - // CHECK: cond_br %arg0, ^[[LIVE_IN:.*]], ^[[REF_COUNTING:.*]] - cond_br %arg0, ^bb1, ^bb2 + // CHECK: cf.cond_br %arg0, ^[[LIVE_IN:.*]], ^[[REF_COUNTING:.*]] + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: // CHECK: ^[[LIVE_IN]]: // CHECK: async.runtime.await %[[TOKEN]] // CHECK: async.runtime.drop_ref %[[TOKEN]] {count = 1 : i64} - // CHECK: br ^[[RETURN:.*]] + // CHECK: cf.br ^[[RETURN:.*]] async.runtime.await %token : !async.token - br ^bb2 + cf.br ^bb2 // CHECK: ^[[REF_COUNTING:.*]]: // CHECK: async.runtime.drop_ref %[[TOKEN]] {count = 1 : i64} - // CHECK: br ^[[RETURN:.*]] + // CHECK: cf.br ^[[RETURN:.*]] ^bb2: // CHECK: ^[[RETURN]]: // CHECK: return @@ -240,20 +240,20 @@ func @divergent_liveness_unique_predecessor(%arg0 : i1) { // CHECK: %[[TOKEN:.*]] = call @token() %token = call @token() : () -> !async.token - // CHECK: cond_br %arg0, ^[[LIVE_IN:.*]], ^[[NO_LIVE_IN:.*]] - cond_br %arg0, ^bb2, ^bb1 + // CHECK: cf.cond_br %arg0, ^[[LIVE_IN:.*]], ^[[NO_LIVE_IN:.*]] + cf.cond_br %arg0, ^bb2, ^bb1 ^bb1: // CHECK: ^[[NO_LIVE_IN]]: // CHECK: async.runtime.drop_ref %[[TOKEN]] {count = 1 : i64} - // CHECK: br ^[[RETURN:.*]] - br ^bb3 + // CHECK: cf.br ^[[RETURN:.*]] + cf.br ^bb3 ^bb2: // CHECK: ^[[LIVE_IN]]: // CHECK: async.runtime.await %[[TOKEN]] // CHECK: async.runtime.drop_ref %[[TOKEN]] {count = 1 : i64} - // CHECK: br ^[[RETURN]] + // CHECK: cf.br ^[[RETURN]] async.runtime.await %token : !async.token - br ^bb3 + cf.br ^bb3 ^bb3: // CHECK: ^[[RETURN]]: // CHECK: return @@ -266,24 +266,24 @@ // CHECK: %[[TOKEN1:.*]] = call @token() %token0 = call @token() : () -> !async.token %token1 = call @token() : () -> !async.token - // CHECK: cond_br %arg0, ^[[AWAIT0:.*]], ^[[AWAIT1:.*]] - cond_br %arg0, ^await0, ^await1 + // CHECK: cf.cond_br %arg0, ^[[AWAIT0:.*]], ^[[AWAIT1:.*]] + cf.cond_br %arg0, ^await0, ^await1 ^await0: // CHECK: ^[[AWAIT0]]: // CHECK: async.runtime.drop_ref %[[TOKEN1]] {count = 1 : i64} // CHECK: async.runtime.await %[[TOKEN0]] // CHECK: async.runtime.drop_ref %[[TOKEN0]] {count = 1 : i64} - // CHECK: br ^[[RETURN:.*]] + // CHECK: cf.br ^[[RETURN:.*]] async.runtime.await %token0 : !async.token - br ^ret + cf.br ^ret ^await1: // CHECK: ^[[AWAIT1]]: // CHECK: async.runtime.drop_ref %[[TOKEN0]] {count = 1 : i64} // CHECK: async.runtime.await %[[TOKEN1]] // CHECK: async.runtime.drop_ref %[[TOKEN1]] {count = 1 : i64} - // CHECK: br ^[[RETURN]] + // CHECK: cf.br ^[[RETURN]] async.runtime.await %token1 : !async.token - br ^ret + cf.br ^ret ^ret: // CHECK: ^[[RETURN]]: // CHECK: return diff --git a/mlir/test/Dialect/Async/async-to-async-runtime-eliminate-blocking.mlir b/mlir/test/Dialect/Async/async-to-async-runtime-eliminate-blocking.mlir --- a/mlir/test/Dialect/Async/async-to-async-runtime-eliminate-blocking.mlir +++ b/mlir/test/Dialect/Async/async-to-async-runtime-eliminate-blocking.mlir @@ -10,7 +10,7 @@ // CHECK: %[[RETURNED_STORAGE:.*]] = async.runtime.create : !async.value // CHECK: %[[ID:.*]] = async.coro.id // CHECK: %[[HDL:.*]] = async.coro.begin %[[ID]] -// CHECK: br ^[[ORIGINAL_ENTRY:.*]] +// CHECK: cf.br ^[[ORIGINAL_ENTRY:.*]] // CHECK ^[[ORIGINAL_ENTRY]]: // CHECK: %[[VAL:.*]] = arith.addf %[[ARG]], %[[ARG]] : f32 %0 = arith.addf %arg0, %arg0 : f32 @@ -29,7 +29,7 @@ // CHECK: ^[[RESUME]]: // CHECK: %[[IS_ERROR:.*]] = async.runtime.is_error %[[VAL_STORAGE]] : !async.value -// CHECK: cond_br %[[IS_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_OK:.*]] +// CHECK: cf.cond_br %[[IS_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_OK:.*]] // CHECK: ^[[BRANCH_OK]]: // CHECK: %[[LOADED:.*]] = async.runtime.load %[[VAL_STORAGE]] : @@ -37,19 +37,19 @@ // CHECK: async.runtime.store %[[RETURNED]], %[[RETURNED_STORAGE]] : // CHECK: async.runtime.set_available %[[RETURNED_STORAGE]] // CHECK: async.runtime.set_available %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] %3 = arith.mulf %arg0, %2 : f32 return %3: f32 // CHECK: ^[[BRANCH_ERROR]]: // CHECK: async.runtime.set_error %[[TOKEN]] // CHECK: async.runtime.set_error %[[RETURNED_STORAGE]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] // CHECK: ^[[CLEANUP]]: // CHECK: async.coro.free %[[ID]], %[[HDL]] -// CHECK: br ^[[SUSPEND]] +// CHECK: cf.br ^[[SUSPEND]] // CHECK: ^[[SUSPEND]]: // CHECK: async.coro.end %[[HDL]] @@ -63,7 +63,7 @@ // CHECK: %[[RETURNED_STORAGE:.*]] = async.runtime.create : !async.value // CHECK: %[[ID:.*]] = async.coro.id // CHECK: %[[HDL:.*]] = async.coro.begin %[[ID]] -// CHECK: br ^[[ORIGINAL_ENTRY:.*]] +// CHECK: cf.br ^[[ORIGINAL_ENTRY:.*]] // CHECK ^[[ORIGINAL_ENTRY]]: // CHECK: %[[CONSTANT:.*]] = arith.constant @@ -77,28 +77,28 @@ // CHECK: ^[[RESUME]]: // CHECK: %[[IS_TOKEN_ERROR:.*]] = async.runtime.is_error %[[RETURNED_TO_CALLER]]#0 : !async.token -// CHECK: cond_br %[[IS_TOKEN_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_TOKEN_OK:.*]] +// CHECK: cf.cond_br %[[IS_TOKEN_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_TOKEN_OK:.*]] // CHECK: ^[[BRANCH_TOKEN_OK]]: // CHECK: %[[IS_VALUE_ERROR:.*]] = async.runtime.is_error %[[RETURNED_TO_CALLER]]#1 : !async.value -// CHECK: cond_br %[[IS_VALUE_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_VALUE_OK:.*]] +// CHECK: cf.cond_br %[[IS_VALUE_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_VALUE_OK:.*]] // CHECK: ^[[BRANCH_VALUE_OK]]: // CHECK: %[[LOADED:.*]] = async.runtime.load %[[RETURNED_TO_CALLER]]#1 : // CHECK: async.runtime.store %[[LOADED]], %[[RETURNED_STORAGE]] : // CHECK: async.runtime.set_available %[[RETURNED_STORAGE]] // CHECK: async.runtime.set_available %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] return %r: f32 // CHECK: ^[[BRANCH_ERROR]]: // CHECK: async.runtime.set_error %[[TOKEN]] // CHECK: async.runtime.set_error %[[RETURNED_STORAGE]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] // CHECK: ^[[CLEANUP]]: // CHECK: async.coro.free %[[ID]], %[[HDL]] -// CHECK: br ^[[SUSPEND]] +// CHECK: cf.br ^[[SUSPEND]] // CHECK: ^[[SUSPEND]]: // CHECK: async.coro.end %[[HDL]] @@ -112,7 +112,7 @@ // CHECK: %[[RETURNED_STORAGE:.*]] = async.runtime.create : !async.value // CHECK: %[[ID:.*]] = async.coro.id // CHECK: %[[HDL:.*]] = async.coro.begin %[[ID]] -// CHECK: br ^[[ORIGINAL_ENTRY:.*]] +// CHECK: cf.br ^[[ORIGINAL_ENTRY:.*]] // CHECK ^[[ORIGINAL_ENTRY]]: // CHECK: %[[CONSTANT:.*]] = arith.constant @@ -126,11 +126,11 @@ // CHECK: ^[[RESUME_1]]: // CHECK: %[[IS_TOKEN_ERROR_1:.*]] = async.runtime.is_error %[[RETURNED_TO_CALLER_1]]#0 : !async.token -// CHECK: cond_br %[[IS_TOKEN_ERROR_1]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_TOKEN_OK_1:.*]] +// CHECK: cf.cond_br %[[IS_TOKEN_ERROR_1]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_TOKEN_OK_1:.*]] // CHECK: ^[[BRANCH_TOKEN_OK_1]]: // CHECK: %[[IS_VALUE_ERROR_1:.*]] = async.runtime.is_error %[[RETURNED_TO_CALLER_1]]#1 : !async.value -// CHECK: cond_br %[[IS_VALUE_ERROR_1]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_VALUE_OK_1:.*]] +// CHECK: cf.cond_br %[[IS_VALUE_ERROR_1]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_VALUE_OK_1:.*]] // CHECK: ^[[BRANCH_VALUE_OK_1]]: // CHECK: %[[LOADED_1:.*]] = async.runtime.load %[[RETURNED_TO_CALLER_1]]#1 : @@ -143,27 +143,27 @@ // CHECK: ^[[RESUME_2]]: // CHECK: %[[IS_TOKEN_ERROR_2:.*]] = async.runtime.is_error %[[RETURNED_TO_CALLER_2]]#0 : !async.token -// CHECK: cond_br %[[IS_TOKEN_ERROR_2]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_TOKEN_OK_2:.*]] +// CHECK: cf.cond_br %[[IS_TOKEN_ERROR_2]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_TOKEN_OK_2:.*]] // CHECK: ^[[BRANCH_TOKEN_OK_2]]: // CHECK: %[[IS_VALUE_ERROR_2:.*]] = async.runtime.is_error %[[RETURNED_TO_CALLER_2]]#1 : !async.value -// CHECK: cond_br %[[IS_VALUE_ERROR_2]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_VALUE_OK_2:.*]] +// CHECK: cf.cond_br %[[IS_VALUE_ERROR_2]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_VALUE_OK_2:.*]] // CHECK: ^[[BRANCH_VALUE_OK_2]]: // CHECK: %[[LOADED_2:.*]] = async.runtime.load %[[RETURNED_TO_CALLER_2]]#1 : // CHECK: async.runtime.store %[[LOADED_2]], %[[RETURNED_STORAGE]] : // CHECK: async.runtime.set_available %[[RETURNED_STORAGE]] // CHECK: async.runtime.set_available %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] return %s: f32 // CHECK: ^[[BRANCH_ERROR]]: // CHECK: async.runtime.set_error %[[TOKEN]] // CHECK: async.runtime.set_error %[[RETURNED_STORAGE]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] // CHECK: ^[[CLEANUP]]: // CHECK: async.coro.free %[[ID]], %[[HDL]] -// CHECK: br ^[[SUSPEND]] +// CHECK: cf.br ^[[SUSPEND]] // CHECK: ^[[SUSPEND]]: // CHECK: async.coro.end %[[HDL]] @@ -184,7 +184,7 @@ async.await %arg : !async.token // CHECK: ^[[RESUME_1]]: // CHECK: %[[IS_ERROR:.*]] = async.runtime.is_error %[[ARG]] : !async.token -// CHECK: cond_br %[[IS_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_OK:.*]] +// CHECK: cf.cond_br %[[IS_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_OK:.*]] // CHECK: ^[[BRANCH_OK]]: // CHECK: %[[GIVEN:.*]] = async.runtime.create : !async.token @@ -200,16 +200,16 @@ // CHECK: ^[[RESUME_2]]: // CHECK: async.runtime.set_available %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] // CHECK: ^[[BRANCH_ERROR]]: // CHECK: async.runtime.set_error %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] return // CHECK: ^[[CLEANUP]]: // CHECK: async.coro.free %[[ID]], %[[HDL]] -// CHECK: br ^[[SUSPEND]] +// CHECK: cf.br ^[[SUSPEND]] // CHECK: ^[[SUSPEND]]: // CHECK: async.coro.end %[[HDL]] @@ -230,7 +230,7 @@ async.await %arg : !async.token // CHECK: ^[[RESUME_1]]: // CHECK: %[[IS_ERROR:.*]] = async.runtime.is_error %[[ARG]] : !async.token -// CHECK: cond_br %[[IS_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_OK:.*]] +// CHECK: cf.cond_br %[[IS_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_OK:.*]] // CHECK: ^[[BRANCH_OK]]: // CHECK: %[[GIVEN:.*]] = async.runtime.create : !async.token @@ -246,16 +246,16 @@ // CHECK: ^[[RESUME_2]]: // CHECK: async.runtime.set_available %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] // CHECK: ^[[BRANCH_ERROR]]: // CHECK: async.runtime.set_error %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] return // CHECK: ^[[CLEANUP]]: // CHECK: async.coro.free %[[ID]], %[[HDL]] -// CHECK: br ^[[SUSPEND]] +// CHECK: cf.br ^[[SUSPEND]] // CHECK: ^[[SUSPEND]]: // CHECK: async.coro.end %[[HDL]] @@ -276,7 +276,7 @@ async.await %arg : !async.token // CHECK: ^[[RESUME_1]]: // CHECK: %[[IS_ERROR:.*]] = async.runtime.is_error %[[ARG]] : !async.token -// CHECK: cond_br %[[IS_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_OK:.*]] +// CHECK: cf.cond_br %[[IS_ERROR]], ^[[BRANCH_ERROR:.*]], ^[[BRANCH_OK:.*]] // CHECK: ^[[BRANCH_OK]]: // CHECK: %[[GIVEN:.*]] = async.runtime.create : !async.token @@ -292,16 +292,16 @@ // CHECK: ^[[RESUME_2]]: // CHECK: async.runtime.set_available %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] // CHECK: ^[[BRANCH_ERROR]]: // CHECK: async.runtime.set_error %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] return // CHECK: ^[[CLEANUP]]: // CHECK: async.coro.free %[[ID]], %[[HDL]] -// CHECK: br ^[[SUSPEND]] +// CHECK: cf.br ^[[SUSPEND]] // CHECK: ^[[SUSPEND]]: // CHECK: async.coro.end %[[HDL]] diff --git a/mlir/test/Dialect/Async/async-to-async-runtime.mlir b/mlir/test/Dialect/Async/async-to-async-runtime.mlir --- a/mlir/test/Dialect/Async/async-to-async-runtime.mlir +++ b/mlir/test/Dialect/Async/async-to-async-runtime.mlir @@ -63,7 +63,7 @@ // CHECK: %[[IS_ERROR:.*]] = async.runtime.is_error %[[TOKEN]] // CHECK: %[[TRUE:.*]] = arith.constant true // CHECK: %[[NOT_ERROR:.*]] = arith.xori %[[IS_ERROR]], %[[TRUE]] : i1 - // CHECK: assert %[[NOT_ERROR]] + // CHECK: cf.assert %[[NOT_ERROR]] // CHECK-NEXT: return async.await %token0 : !async.token return @@ -109,7 +109,7 @@ // Check the error of the awaited token after resumption. // CHECK: ^[[RESUME_1]]: // CHECK: %[[ERR:.*]] = async.runtime.is_error %[[INNER_TOKEN]] -// CHECK: cond_br %[[ERR]], ^[[SET_ERROR:.*]], ^[[CONTINUATION:.*]] +// CHECK: cf.cond_br %[[ERR]], ^[[SET_ERROR:.*]], ^[[CONTINUATION:.*]] // Set token available if the token is not in the error state. // CHECK: ^[[CONTINUATION:.*]]: @@ -169,7 +169,7 @@ // Check the error of the awaited token after resumption. // CHECK: ^[[RESUME_1]]: // CHECK: %[[ERR:.*]] = async.runtime.is_error %[[ARG0]] -// CHECK: cond_br %[[ERR]], ^[[SET_ERROR:.*]], ^[[CONTINUATION:.*]] +// CHECK: cf.cond_br %[[ERR]], ^[[SET_ERROR:.*]], ^[[CONTINUATION:.*]] // Emplace result token after second resumption and error checking. // CHECK: ^[[CONTINUATION:.*]]: @@ -225,7 +225,7 @@ // Check the error of the awaited token after resumption. // CHECK: ^[[RESUME_1]]: // CHECK: %[[ERR:.*]] = async.runtime.is_error %[[ARG]] -// CHECK: cond_br %[[ERR]], ^[[SET_ERROR:.*]], ^[[CONTINUATION:.*]] +// CHECK: cf.cond_br %[[ERR]], ^[[SET_ERROR:.*]], ^[[CONTINUATION:.*]] // Emplace result token after error checking. // CHECK: ^[[CONTINUATION:.*]]: @@ -319,7 +319,7 @@ // Check the error of the awaited token after resumption. // CHECK: ^[[RESUME_1]]: // CHECK: %[[ERR:.*]] = async.runtime.is_error %[[ARG]] -// CHECK: cond_br %[[ERR]], ^[[SET_ERROR:.*]], ^[[CONTINUATION:.*]] +// CHECK: cf.cond_br %[[ERR]], ^[[SET_ERROR:.*]], ^[[CONTINUATION:.*]] // // Load from the async.value argument after error checking. // CHECK: ^[[CONTINUATION:.*]]: @@ -335,7 +335,7 @@ // CHECK-LABEL: @execute_assertion func @execute_assertion(%arg0: i1) { %token = async.execute { - assert %arg0, "error" + cf.assert %arg0, "error" async.yield } async.await %token : !async.token @@ -358,17 +358,17 @@ // Resume coroutine after suspension. // CHECK: ^[[RESUME]]: -// CHECK: cond_br %[[ARG0]], ^[[SET_AVAILABLE:.*]], ^[[SET_ERROR:.*]] +// CHECK: cf.cond_br %[[ARG0]], ^[[SET_AVAILABLE:.*]], ^[[SET_ERROR:.*]] // Set coroutine completion token to available state. // CHECK: ^[[SET_AVAILABLE]]: // CHECK: async.runtime.set_available %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] // Set coroutine completion token to error state. // CHECK: ^[[SET_ERROR]]: // CHECK: async.runtime.set_error %[[TOKEN]] -// CHECK: br ^[[CLEANUP]] +// CHECK: cf.br ^[[CLEANUP]] // Delete coroutine. // CHECK: ^[[CLEANUP]]: @@ -409,7 +409,7 @@ // Check that structured control flow lowered to CFG. // CHECK-NOT: scf.if -// CHECK: cond_br %[[FLAG]] +// CHECK: cf.cond_br %[[FLAG]] // ----- // Constants captured by the async.execute region should be cloned into the diff --git a/mlir/test/Dialect/Bufferization/Transforms/buffer-deallocation.mlir b/mlir/test/Dialect/Bufferization/Transforms/buffer-deallocation.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/buffer-deallocation.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/buffer-deallocation.mlir @@ -17,26 +17,26 @@ // CHECK-LABEL: func @condBranch func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: %[[ALLOC0:.*]] = bufferization.clone -// CHECK-NEXT: br ^bb3(%[[ALLOC0]] +// CHECK-NEXT: cf.br ^bb3(%[[ALLOC0]] // CHECK: %[[ALLOC1:.*]] = memref.alloc // CHECK-NEXT: test.buffer_based // CHECK-NEXT: %[[ALLOC2:.*]] = bufferization.clone %[[ALLOC1]] // CHECK-NEXT: memref.dealloc %[[ALLOC1]] -// CHECK-NEXT: br ^bb3(%[[ALLOC2]] +// CHECK-NEXT: cf.br ^bb3(%[[ALLOC2]] // CHECK: test.copy // CHECK-NEXT: memref.dealloc // CHECK-NEXT: return @@ -62,27 +62,27 @@ %arg1: memref, %arg2: memref, %arg3: index) { - cond_br %arg0, ^bb1, ^bb2(%arg3: index) + cf.cond_br %arg0, ^bb1, ^bb2(%arg3: index) ^bb1: - br ^bb3(%arg1 : memref) + cf.br ^bb3(%arg1 : memref) ^bb2(%0: index): %1 = memref.alloc(%0) : memref test.buffer_based in(%arg1: memref) out(%1: memref) - br ^bb3(%1 : memref) + cf.br ^bb3(%1 : memref) ^bb3(%2: memref): test.copy(%2, %arg2) : (memref, memref) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: %[[ALLOC0:.*]] = bufferization.clone -// CHECK-NEXT: br ^bb3(%[[ALLOC0]] +// CHECK-NEXT: cf.br ^bb3(%[[ALLOC0]] // CHECK: ^bb2(%[[IDX:.*]]:{{.*}}) // CHECK-NEXT: %[[ALLOC1:.*]] = memref.alloc(%[[IDX]]) // CHECK-NEXT: test.buffer_based // CHECK-NEXT: %[[ALLOC2:.*]] = bufferization.clone // CHECK-NEXT: memref.dealloc %[[ALLOC1]] -// CHECK-NEXT: br ^bb3 +// CHECK-NEXT: cf.br ^bb3 // CHECK-NEXT: ^bb3(%[[ALLOC3:.*]]:{{.*}}) // CHECK: test.copy(%[[ALLOC3]], // CHECK-NEXT: memref.dealloc %[[ALLOC3]] @@ -98,28 +98,28 @@ %arg1: memref<*xf32>, %arg2: memref<*xf32>, %arg3: index) { - cond_br %arg0, ^bb1, ^bb2(%arg3: index) + cf.cond_br %arg0, ^bb1, ^bb2(%arg3: index) ^bb1: - br ^bb3(%arg1 : memref<*xf32>) + cf.br ^bb3(%arg1 : memref<*xf32>) ^bb2(%0: index): %1 = memref.alloc(%0) : memref %2 = memref.cast %1 : memref to memref<*xf32> test.buffer_based in(%arg1: memref<*xf32>) out(%2: memref<*xf32>) - br ^bb3(%2 : memref<*xf32>) + cf.br ^bb3(%2 : memref<*xf32>) ^bb3(%3: memref<*xf32>): test.copy(%3, %arg2) : (memref<*xf32>, memref<*xf32>) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: %[[ALLOC0:.*]] = bufferization.clone -// CHECK-NEXT: br ^bb3(%[[ALLOC0]] +// CHECK-NEXT: cf.br ^bb3(%[[ALLOC0]] // CHECK: ^bb2(%[[IDX:.*]]:{{.*}}) // CHECK-NEXT: %[[ALLOC1:.*]] = memref.alloc(%[[IDX]]) // CHECK: test.buffer_based // CHECK-NEXT: %[[ALLOC2:.*]] = bufferization.clone // CHECK-NEXT: memref.dealloc %[[ALLOC1]] -// CHECK-NEXT: br ^bb3 +// CHECK-NEXT: cf.br ^bb3 // CHECK-NEXT: ^bb3(%[[ALLOC3:.*]]:{{.*}}) // CHECK: test.copy(%[[ALLOC3]], // CHECK-NEXT: memref.dealloc %[[ALLOC3]] @@ -153,44 +153,44 @@ %arg1: memref, %arg2: memref, %arg3: index) { - cond_br %arg0, ^bb1, ^bb2(%arg3: index) + cf.cond_br %arg0, ^bb1, ^bb2(%arg3: index) ^bb1: - br ^bb6(%arg1 : memref) + cf.br ^bb6(%arg1 : memref) ^bb2(%0: index): %1 = memref.alloc(%0) : memref test.buffer_based in(%arg1: memref) out(%1: memref) - cond_br %arg0, ^bb3, ^bb4 + cf.cond_br %arg0, ^bb3, ^bb4 ^bb3: - br ^bb5(%1 : memref) + cf.br ^bb5(%1 : memref) ^bb4: - br ^bb5(%1 : memref) + cf.br ^bb5(%1 : memref) ^bb5(%2: memref): - br ^bb6(%2 : memref) + cf.br ^bb6(%2 : memref) ^bb6(%3: memref): - br ^bb7(%3 : memref) + cf.br ^bb7(%3 : memref) ^bb7(%4: memref): test.copy(%4, %arg2) : (memref, memref) return } -// CHECK-NEXT: cond_br{{.*}} +// CHECK-NEXT: cf.cond_br{{.*}} // CHECK-NEXT: ^bb1 // CHECK-NEXT: %[[ALLOC0:.*]] = bufferization.clone -// CHECK-NEXT: br ^bb6(%[[ALLOC0]] +// CHECK-NEXT: cf.br ^bb6(%[[ALLOC0]] // CHECK: ^bb2(%[[IDX:.*]]:{{.*}}) // CHECK-NEXT: %[[ALLOC1:.*]] = memref.alloc(%[[IDX]]) // CHECK-NEXT: test.buffer_based -// CHECK: cond_br +// CHECK: cf.cond_br // CHECK: ^bb3: -// CHECK-NEXT: br ^bb5(%[[ALLOC1]]{{.*}}) +// CHECK-NEXT: cf.br ^bb5(%[[ALLOC1]]{{.*}}) // CHECK: ^bb4: -// CHECK-NEXT: br ^bb5(%[[ALLOC1]]{{.*}}) +// CHECK-NEXT: cf.br ^bb5(%[[ALLOC1]]{{.*}}) // CHECK-NEXT: ^bb5(%[[ALLOC2:.*]]:{{.*}}) // CHECK-NEXT: %[[ALLOC3:.*]] = bufferization.clone %[[ALLOC2]] // CHECK-NEXT: memref.dealloc %[[ALLOC1]] -// CHECK-NEXT: br ^bb6(%[[ALLOC3]]{{.*}}) +// CHECK-NEXT: cf.br ^bb6(%[[ALLOC3]]{{.*}}) // CHECK-NEXT: ^bb6(%[[ALLOC4:.*]]:{{.*}}) -// CHECK-NEXT: br ^bb7(%[[ALLOC4]]{{.*}}) +// CHECK-NEXT: cf.br ^bb7(%[[ALLOC4]]{{.*}}) // CHECK-NEXT: ^bb7(%[[ALLOC5:.*]]:{{.*}}) // CHECK: test.copy(%[[ALLOC5]], // CHECK-NEXT: memref.dealloc %[[ALLOC4]] @@ -225,18 +225,18 @@ // CHECK-LABEL: func @criticalEdge func @criticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>) + cf.cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>) ^bb1: %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - br ^bb2(%0 : memref<2xf32>) + cf.br ^bb2(%0 : memref<2xf32>) ^bb2(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } // CHECK-NEXT: %[[ALLOC0:.*]] = bufferization.clone -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: %[[ALLOC1:.*]] = memref.alloc() // CHECK-NEXT: test.buffer_based // CHECK-NEXT: %[[ALLOC2:.*]] = bufferization.clone %[[ALLOC1]] @@ -260,9 +260,9 @@ func @invCriticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>) + cf.cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>) ^bb1: - br ^bb2(%0 : memref<2xf32>) + cf.br ^bb2(%0 : memref<2xf32>) ^bb2(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return @@ -288,13 +288,13 @@ func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) ^bb3(%5: memref<2xf32>, %6: memref<2xf32>): %7 = memref.alloc() : memref<2xf32> test.buffer_based in(%5: memref<2xf32>) out(%7: memref<2xf32>) @@ -326,13 +326,13 @@ func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) ^bb3(%5: memref<2xf32>, %6: memref<2xf32>): test.copy(%arg1, %arg2) : (memref<2xf32>, memref<2xf32>) return @@ -361,17 +361,17 @@ func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb5(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - cond_br %arg0, ^bb3(%3 : memref<2xf32>), ^bb4(%4 : memref<2xf32>) + cf.cond_br %arg0, ^bb3(%3 : memref<2xf32>), ^bb4(%4 : memref<2xf32>) ^bb3(%5: memref<2xf32>): - br ^bb5(%5, %3 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%5, %3 : memref<2xf32>, memref<2xf32>) ^bb4(%6: memref<2xf32>): - br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>) ^bb5(%7: memref<2xf32>, %8: memref<2xf32>): %9 = memref.alloc() : memref<2xf32> test.buffer_based in(%7: memref<2xf32>) out(%9: memref<2xf32>) @@ -430,33 +430,33 @@ %cond: i1, %arg0: memref<2xf32>, %arg1: memref<2xf32>) { - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>) - br ^exit(%0 : memref<2xf32>) + cf.br ^exit(%0 : memref<2xf32>) ^bb2: %1 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg0: memref<2xf32>) out(%1: memref<2xf32>) - br ^exit(%1 : memref<2xf32>) + cf.br ^exit(%1 : memref<2xf32>) ^exit(%arg2: memref<2xf32>): test.copy(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>) return } -// CHECK-NEXT: cond_br{{.*}} +// CHECK-NEXT: cf.cond_br{{.*}} // CHECK-NEXT: ^bb1 // CHECK: %[[ALLOC0:.*]] = memref.alloc() // CHECK-NEXT: test.buffer_based // CHECK-NEXT: %[[ALLOC1:.*]] = bufferization.clone %[[ALLOC0]] // CHECK-NEXT: memref.dealloc %[[ALLOC0]] -// CHECK-NEXT: br ^bb3(%[[ALLOC1]] +// CHECK-NEXT: cf.br ^bb3(%[[ALLOC1]] // CHECK-NEXT: ^bb2 // CHECK-NEXT: %[[ALLOC2:.*]] = memref.alloc() // CHECK-NEXT: test.buffer_based // CHECK-NEXT: %[[ALLOC3:.*]] = bufferization.clone %[[ALLOC2]] // CHECK-NEXT: memref.dealloc %[[ALLOC2]] -// CHECK-NEXT: br ^bb3(%[[ALLOC3]] +// CHECK-NEXT: cf.br ^bb3(%[[ALLOC3]] // CHECK-NEXT: ^bb3(%[[ALLOC4:.*]]:{{.*}}) // CHECK: test.copy // CHECK-NEXT: memref.dealloc %[[ALLOC4]] @@ -480,20 +480,20 @@ %arg0: memref<2xf32>, %arg1: memref<2xf32>) { %1 = memref.alloc() : memref<2xf32> - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: - br ^exit(%arg0 : memref<2xf32>) + cf.br ^exit(%arg0 : memref<2xf32>) ^bb2: test.buffer_based in(%arg0: memref<2xf32>) out(%1: memref<2xf32>) memref.dealloc %1 : memref<2xf32> - br ^exit(%1 : memref<2xf32>) + cf.br ^exit(%1 : memref<2xf32>) ^exit(%arg2: memref<2xf32>): test.copy(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>) return } // CHECK-NEXT: %[[ALLOC0:.*]] = memref.alloc() -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: test.copy // CHECK-NEXT: memref.dealloc %[[ALLOC0]] // CHECK-NEXT: return @@ -548,9 +548,9 @@ %arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) { @@ -560,13 +560,13 @@ %tmp1 = math.exp %gen1_arg0 : f32 test.region_yield %tmp1 : f32 } - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } // CHECK: (%[[cond:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %{{.*}}: {{.*}}) -// CHECK-NEXT: cond_br %[[cond]], ^[[BB1:.*]], ^[[BB2:.*]] +// CHECK-NEXT: cf.cond_br %[[cond]], ^[[BB1:.*]], ^[[BB2:.*]] // CHECK: %[[ALLOC0:.*]] = bufferization.clone %[[ARG1]] // CHECK: ^[[BB2]]: // CHECK: %[[ALLOC1:.*]] = memref.alloc() @@ -728,21 +728,21 @@ // CHECK-LABEL: func @condBranchAlloca func @condBranchAlloca(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloca() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: %[[ALLOCA:.*]] = memref.alloca() -// CHECK: br ^bb3(%[[ALLOCA:.*]]) +// CHECK: cf.br ^bb3(%[[ALLOCA:.*]]) // CHECK-NEXT: ^bb3 // CHECK-NEXT: test.copy // CHECK-NEXT: return @@ -757,13 +757,13 @@ func @ifElseAlloca(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) ^bb3(%5: memref<2xf32>, %6: memref<2xf32>): %7 = memref.alloca() : memref<2xf32> test.buffer_based in(%5: memref<2xf32>) out(%7: memref<2xf32>) @@ -788,17 +788,17 @@ %arg2: memref<2xf32>) { %0 = memref.alloca() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb5(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - cond_br %arg0, ^bb3(%3 : memref<2xf32>), ^bb4(%4 : memref<2xf32>) + cf.cond_br %arg0, ^bb3(%3 : memref<2xf32>), ^bb4(%4 : memref<2xf32>) ^bb3(%5: memref<2xf32>): - br ^bb5(%5, %3 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%5, %3 : memref<2xf32>, memref<2xf32>) ^bb4(%6: memref<2xf32>): - br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>) ^bb5(%7: memref<2xf32>, %8: memref<2xf32>): %9 = memref.alloc() : memref<2xf32> test.buffer_based in(%7: memref<2xf32>) out(%9: memref<2xf32>) @@ -821,9 +821,9 @@ %arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) { @@ -833,13 +833,13 @@ %tmp1 = math.exp %gen1_arg0 : f32 test.region_yield %tmp1 : f32 } - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } // CHECK: (%[[cond:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %{{.*}}: {{.*}}) -// CHECK-NEXT: cond_br %[[cond]], ^[[BB1:.*]], ^[[BB2:.*]] +// CHECK-NEXT: cf.cond_br %[[cond]], ^[[BB1:.*]], ^[[BB2:.*]] // CHECK: ^[[BB1]]: // CHECK: %[[ALLOC0:.*]] = bufferization.clone // CHECK: ^[[BB2]]: @@ -1103,11 +1103,11 @@ %arg2: memref, %arg3: memref) { %const0 = arith.constant 0 : i32 - br ^loopHeader(%const0, %arg2 : i32, memref) + cf.br ^loopHeader(%const0, %arg2 : i32, memref) ^loopHeader(%i : i32, %buff : memref): %lessThan = arith.cmpi slt, %i, %arg1 : i32 - cond_br %lessThan, + cf.cond_br %lessThan, ^loopBody(%i, %buff : i32, memref), ^exit(%buff : memref) @@ -1116,7 +1116,7 @@ %inc = arith.addi %val, %const1 : i32 %size = arith.index_cast %inc : i32 to index %alloc1 = memref.alloc(%size) : memref - br ^loopHeader(%inc, %alloc1 : i32, memref) + cf.br ^loopHeader(%inc, %alloc1 : i32, memref) ^exit(%buff3 : memref): test.copy(%buff3, %arg3) : (memref, memref) @@ -1136,17 +1136,17 @@ %arg2: memref<2xf32>, %arg3: memref<2xf32>) { %const0 = arith.constant 0 : i32 - br ^loopBody(%const0, %arg2 : i32, memref<2xf32>) + cf.br ^loopBody(%const0, %arg2 : i32, memref<2xf32>) ^loopBody(%val : i32, %buff2: memref<2xf32>): %const1 = arith.constant 1 : i32 %inc = arith.addi %val, %const1 : i32 %alloc1 = memref.alloc() : memref<2xf32> - br ^loopHeader(%inc, %alloc1 : i32, memref<2xf32>) + cf.br ^loopHeader(%inc, %alloc1 : i32, memref<2xf32>) ^loopHeader(%i : i32, %buff : memref<2xf32>): %lessThan = arith.cmpi slt, %i, %arg1 : i32 - cond_br %lessThan, + cf.cond_br %lessThan, ^loopBody(%i, %buff : i32, memref<2xf32>), ^exit(%buff : memref<2xf32>) diff --git a/mlir/test/Dialect/Standard/canonicalize-cf.mlir b/mlir/test/Dialect/ControlFlow/canonicalize.mlir rename from mlir/test/Dialect/Standard/canonicalize-cf.mlir rename to mlir/test/Dialect/ControlFlow/canonicalize.mlir --- a/mlir/test/Dialect/Standard/canonicalize-cf.mlir +++ b/mlir/test/Dialect/ControlFlow/canonicalize.mlir @@ -7,7 +7,7 @@ // CHECK-NEXT: %[[CST:.*]] = arith.constant 0 : i32 // CHECK-NEXT: return %[[CST]] : i32 %c0_i32 = arith.constant 0 : i32 - br ^bb1(%c0_i32 : i32) + cf.br ^bb1(%c0_i32 : i32) ^bb1(%x : i32): return %x : i32 } @@ -21,12 +21,12 @@ ^bb1: // CHECK: ^bb1: - // CHECK-NEXT: br ^bb3(%[[ARG0]], %[[ARG1]] : i32, i32) + // CHECK-NEXT: cf.br ^bb3(%[[ARG0]], %[[ARG1]] : i32, i32) - br ^bb2(%arg0 : i32) + cf.br ^bb2(%arg0 : i32) ^bb2(%arg2 : i32): - br ^bb3(%arg2, %arg1 : i32, i32) + cf.br ^bb3(%arg2, %arg1 : i32, i32) ^bb3(%arg4 : i32, %arg5 : i32): return %arg4, %arg5 : i32, i32 @@ -40,13 +40,13 @@ %false_cond = arith.constant false %true_cond = arith.constant true - cond_br %cond, ^bb1, ^bb2(%a : i32) + cf.cond_br %cond, ^bb1, ^bb2(%a : i32) ^bb1: - cond_br %true_cond, ^bb3, ^bb2(%a : i32) + cf.cond_br %true_cond, ^bb3, ^bb2(%a : i32) ^bb2(%x : i32): - cond_br %false_cond, ^bb2(%x : i32), ^bb3 + cf.cond_br %false_cond, ^bb2(%x : i32), ^bb3 ^bb3: return @@ -58,7 +58,7 @@ func @cond_br_same_successor(%cond : i1, %a : i32) { // CHECK-NEXT: return - cond_br %cond, ^bb1(%a : i32), ^bb1(%a : i32) + cf.cond_br %cond, ^bb1(%a : i32), ^bb1(%a : i32) ^bb1(%result : i32): return @@ -77,7 +77,7 @@ // CHECK: %[[RES2:.*]] = arith.select %[[COND]], %[[ARG2]], %[[ARG3]] // CHECK: return %[[RES]], %[[RES2]] - cond_br %cond, ^bb1(%a, %c : i32, tensor<2xi32>), ^bb1(%b, %d : i32, tensor<2xi32>) + cf.cond_br %cond, ^bb1(%a, %c : i32, tensor<2xi32>), ^bb1(%b, %d : i32, tensor<2xi32>) ^bb1(%result : i32, %result2 : tensor<2xi32>): return %result, %result2 : i32, tensor<2xi32> @@ -91,10 +91,10 @@ %false_cond = arith.constant false %true_cond = arith.constant true - cond_br %true_cond, ^bb2, ^bb1(%a : i32) + cf.cond_br %true_cond, ^bb2, ^bb1(%a : i32) ^bb1(%x : i32): - cond_br %false_cond, ^bb1(%x : i32), ^bb2 + cf.cond_br %false_cond, ^bb1(%x : i32), ^bb2 ^bb2: return @@ -109,10 +109,10 @@ // CHECK: %[[RES2:.*]] = arith.select %[[COND]], %[[ARG1]], %[[ARG2]] // CHECK: return %[[RES]], %[[RES2]] - cond_br %cond, ^bb1(%arg0 : i32), ^bb2(%arg2, %arg2 : i32, i32) + cf.cond_br %cond, ^bb1(%arg0 : i32), ^bb2(%arg2, %arg2 : i32, i32) ^bb1(%arg3: i32): - br ^bb2(%arg3, %arg1 : i32, i32) + cf.br ^bb2(%arg3, %arg1 : i32, i32) ^bb2(%arg4: i32, %arg5: i32): return %arg4, %arg5 : i32, i32 @@ -122,18 +122,18 @@ // CHECK-LABEL: func @cond_br_pass_through_fail( func @cond_br_pass_through_fail(%cond : i1) { - // CHECK: cond_br %{{.*}}, ^bb1, ^bb2 + // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2 - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: // CHECK: ^bb1: // CHECK: "foo.op" - // CHECK: br ^bb2 + // CHECK: cf.br ^bb2 // Successors can't be collapsed if they contain other operations. "foo.op"() : () -> () - br ^bb2 + cf.br ^bb2 ^bb2: return @@ -149,9 +149,9 @@ // add predecessors for all blocks to avoid other canonicalizations. "foo.pred"() [^bb1, ^bb2] : () -> () ^bb1: - // CHECK-NOT: switch - // CHECK: br ^[[BB2:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_0]] - switch %flag : i32, [ + // CHECK-NOT: cf.switch + // CHECK: cf.br ^[[BB2:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_0]] + cf.switch %flag : i32, [ default: ^bb2(%caseOperand0 : f32) ] // CHECK: ^[[BB2]]({{.*}}): @@ -169,11 +169,11 @@ // add predecessors for all blocks to avoid other canonicalizations. "foo.pred"() [^bb1, ^bb2, ^bb3] : () -> () ^bb1: - // CHECK: switch %[[FLAG]] + // CHECK: cf.switch %[[FLAG]] // CHECK-NEXT: default: ^[[BB1:.+]](%[[CASE_OPERAND_0]] : f32) // CHECK-NEXT: 10: ^[[BB2:.+]](%[[CASE_OPERAND_1]] : f32) // CHECK-NEXT: ] - switch %flag : i32, [ + cf.switch %flag : i32, [ default: ^bb2(%caseOperand0 : f32), 42: ^bb2(%caseOperand0 : f32), 10: ^bb3(%caseOperand1 : f32), @@ -194,10 +194,10 @@ // add predecessors for all blocks to avoid other canonicalizations. "foo.pred"() [^bb1, ^bb2, ^bb3, ^bb4] : () -> () ^bb1: - // CHECK-NOT: switch - // CHECK: br ^[[BB2:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_0]] + // CHECK-NOT: cf.switch + // CHECK: cf.br ^[[BB2:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_0]] %c0_i32 = arith.constant 0 : i32 - switch %c0_i32 : i32, [ + cf.switch %c0_i32 : i32, [ default: ^bb2(%caseOperand0 : f32), -1: ^bb3(%caseOperand1 : f32), 1: ^bb4(%caseOperand2 : f32) @@ -220,10 +220,10 @@ // add predecessors for all blocks to avoid other canonicalizations. "foo.pred"() [^bb1, ^bb2, ^bb3, ^bb4] : () -> () ^bb1: - // CHECK-NOT: switch - // CHECK: br ^[[BB4:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_2]] + // CHECK-NOT: cf.switch + // CHECK: cf.br ^[[BB4:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_2]] %c0_i32 = arith.constant 1 : i32 - switch %c0_i32 : i32, [ + cf.switch %c0_i32 : i32, [ default: ^bb2(%caseOperand0 : f32), -1: ^bb3(%caseOperand1 : f32), 1: ^bb4(%caseOperand2 : f32) @@ -253,20 +253,20 @@ "foo.pred"() [^bb1, ^bb2, ^bb3, ^bb4, ^bb5, ^bb6] : () -> () ^bb1: - // CHECK: switch %[[FLAG]] + // CHECK: cf.switch %[[FLAG]] // CHECK-NEXT: default: ^[[BB5:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_0]] // CHECK-NEXT: 43: ^[[BB6:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_1]] // CHECK-NEXT: 44: ^[[BB4:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_2]] // CHECK-NEXT: ] - switch %flag : i32, [ + cf.switch %flag : i32, [ default: ^bb2(%caseOperand0 : f32), 43: ^bb3(%caseOperand1 : f32), 44: ^bb4(%caseOperand2 : f32) ] ^bb2(%bb2Arg : f32): - br ^bb5(%bb2Arg : f32) + cf.br ^bb5(%bb2Arg : f32) ^bb3(%bb3Arg : f32): - br ^bb6(%bb3Arg : f32) + cf.br ^bb6(%bb3Arg : f32) ^bb4(%bb4Arg : f32): "foo.bb4Terminator"(%bb4Arg) : (f32) -> () @@ -290,8 +290,8 @@ "foo.pred"() [^bb1, ^bb2, ^bb4, ^bb5] : () -> () ^bb1: - // CHECK: switch %[[FLAG]] - switch %flag : i32, [ + // CHECK: cf.switch %[[FLAG]] + cf.switch %flag : i32, [ default: ^bb2, 42: ^bb3 ] @@ -301,9 +301,9 @@ ^bb3: // prevent this block from being simplified away "foo.op"() : () -> () - // CHECK-NOT: switch %[[FLAG]] - // CHECK: br ^[[BB5:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_1]] - switch %flag : i32, [ + // CHECK-NOT: cf.switch %[[FLAG]] + // CHECK: cf.br ^[[BB5:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_1]] + cf.switch %flag : i32, [ default: ^bb4(%caseOperand0 : f32), 42: ^bb5(%caseOperand1 : f32) ] @@ -327,8 +327,8 @@ "foo.pred"() [^bb1, ^bb2, ^bb4, ^bb5, ^bb6] : () -> () ^bb1: - // CHECK: switch %[[FLAG]] - switch %flag : i32, [ + // CHECK: cf.switch %[[FLAG]] + cf.switch %flag : i32, [ default: ^bb2, 42: ^bb3 ] @@ -337,9 +337,9 @@ "foo.bb2Terminator"() : () -> () ^bb3: "foo.op"() : () -> () - // CHECK-NOT: switch %[[FLAG]] - // CHECK: br ^[[BB4:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_0]] - switch %flag : i32, [ + // CHECK-NOT: cf.switch %[[FLAG]] + // CHECK: cf.br ^[[BB4:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_0]] + cf.switch %flag : i32, [ default: ^bb4(%caseOperand0 : f32), 0: ^bb5(%caseOperand1 : f32), 43: ^bb6(%caseOperand2 : f32) @@ -367,8 +367,8 @@ "foo.pred"() [^bb1, ^bb2, ^bb4, ^bb5, ^bb6] : () -> () ^bb1: - // CHECK: switch %[[FLAG]] - switch %flag : i32, [ + // CHECK: cf.switch %[[FLAG]] + cf.switch %flag : i32, [ default: ^bb3, 42: ^bb2 ] @@ -377,11 +377,11 @@ "foo.bb2Terminator"() : () -> () ^bb3: "foo.op"() : () -> () - // CHECK: switch %[[FLAG]] + // CHECK: cf.switch %[[FLAG]] // CHECK-NEXT: default: ^[[BB4:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_0]] // CHECK-NEXT: 43: ^[[BB6:[a-zA-Z0-9_]+]](%[[CASE_OPERAND_2]] // CHECK-NOT: 42 - switch %flag : i32, [ + cf.switch %flag : i32, [ default: ^bb4(%caseOperand0 : f32), 42: ^bb5(%caseOperand1 : f32), 43: ^bb6(%caseOperand2 : f32) @@ -406,14 +406,14 @@ // CHECK-LABEL: func @cond_br_from_cond_br_with_same_condition func @cond_br_from_cond_br_with_same_condition(%cond : i1) { - // CHECK: cond_br %{{.*}}, ^bb1, ^bb2 + // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2 // CHECK: ^bb1: // CHECK: return - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: - cond_br %cond, ^bb3, ^bb2 + cf.cond_br %cond, ^bb3, ^bb2 ^bb2: "foo.terminator"() : () -> () @@ -427,19 +427,41 @@ // Erase assertion if condition is known to be true at compile time. // CHECK-LABEL: @assert_true func @assert_true() { - // CHECK-NOT: assert + // CHECK-NOT: cf.assert %true = arith.constant true - assert %true, "Computer says no" + cf.assert %true, "Computer says no" return } // ----- // Keep assertion if condition unknown at compile time. -// CHECK-LABEL: @assert +// CHECK-LABEL: @cf.assert // CHECK-SAME: (%[[ARG:.*]]: i1) -func @assert(%arg : i1) { - // CHECK: assert %[[ARG]], "Computer says no" - assert %arg, "Computer says no" +func @cf.assert(%arg : i1) { + // CHECK: cf.assert %[[ARG]], "Computer says no" + cf.assert %arg, "Computer says no" + return +} + +// ----- + +// CHECK-LABEL: @branchCondProp +// CHECK: %[[trueval:.+]] = arith.constant true +// CHECK: %[[falseval:.+]] = arith.constant false +// CHECK: "test.consumer1"(%[[trueval]]) : (i1) -> () +// CHECK: "test.consumer2"(%[[falseval]]) : (i1) -> () +func @branchCondProp(%arg0: i1) { + cf.cond_br %arg0, ^trueB, ^falseB + +^trueB: + "test.consumer1"(%arg0) : (i1) -> () + cf.br ^exit + +^falseB: + "test.consumer2"(%arg0) : (i1) -> () + cf.br ^exit + +^exit: return } diff --git a/mlir/test/Dialect/Standard/parser.mlir b/mlir/test/Dialect/ControlFlow/invalid.mlir rename from mlir/test/Dialect/Standard/parser.mlir rename to mlir/test/Dialect/ControlFlow/invalid.mlir --- a/mlir/test/Dialect/Standard/parser.mlir +++ b/mlir/test/Dialect/ControlFlow/invalid.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt -verify-diagnostics -split-input-file %s func @switch_missing_case_value(%flag : i32, %caseOperand : i32) { - switch %flag : i32, [ + cf.switch %flag : i32, [ default: ^bb1(%caseOperand : i32), 45: ^bb2(%caseOperand : i32), // expected-error@+1 {{expected integer value}} @@ -19,7 +19,7 @@ // ----- func @switch_wrong_type_case_value(%flag : i32, %caseOperand : i32) { - switch %flag : i32, [ + cf.switch %flag : i32, [ default: ^bb1(%caseOperand : i32), // expected-error@+1 {{expected integer value}} "hello": ^bb2(%caseOperand : i32) @@ -36,7 +36,7 @@ // ----- func @switch_missing_comma(%flag : i32, %caseOperand : i32) { - switch %flag : i32, [ + cf.switch %flag : i32, [ default: ^bb1(%caseOperand : i32), 45: ^bb2(%caseOperand : i32) // expected-error@+1 {{expected ']'}} @@ -54,7 +54,7 @@ // ----- func @switch_missing_default(%flag : i32, %caseOperand : i32) { - switch %flag : i32, [ + cf.switch %flag : i32, [ // expected-error@+1 {{expected 'default'}} 45: ^bb2(%caseOperand : i32) 43: ^bb3(%caseOperand : i32) diff --git a/mlir/test/Dialect/Standard/ops.mlir b/mlir/test/Dialect/ControlFlow/ops.mlir rename from mlir/test/Dialect/Standard/ops.mlir rename to mlir/test/Dialect/ControlFlow/ops.mlir --- a/mlir/test/Dialect/Standard/ops.mlir +++ b/mlir/test/Dialect/ControlFlow/ops.mlir @@ -3,25 +3,13 @@ // CHECK-LABEL: @assert func @assert(%arg : i1) { - assert %arg, "Some message in case this assertion fails." + cf.assert %arg, "Some message in case this assertion fails." return } -// CHECK-LABEL: @atan -func @atan(%arg : f32) -> f32 { - %result = math.atan %arg : f32 - return %result : f32 -} - -// CHECK-LABEL: @atan2 -func @atan2(%arg0 : f32, %arg1 : f32) -> f32 { - %result = math.atan2 %arg0, %arg1 : f32 - return %result : f32 -} - // CHECK-LABEL: func @switch( func @switch(%flag : i32, %caseOperand : i32) { - switch %flag : i32, [ + cf.switch %flag : i32, [ default: ^bb1(%caseOperand : i32), 42: ^bb2(%caseOperand : i32), 43: ^bb3(%caseOperand : i32) @@ -37,7 +25,7 @@ // CHECK-LABEL: func @switch_i64( func @switch_i64(%flag : i64, %caseOperand : i32) { - switch %flag : i64, [ + cf.switch %flag : i64, [ default: ^bb1(%caseOperand : i32), 42: ^bb2(%caseOperand : i32), 43: ^bb3(%caseOperand : i32) diff --git a/mlir/test/Dialect/GPU/all-reduce-max.mlir b/mlir/test/Dialect/GPU/all-reduce-max.mlir --- a/mlir/test/Dialect/GPU/all-reduce-max.mlir +++ b/mlir/test/Dialect/GPU/all-reduce-max.mlir @@ -39,54 +39,54 @@ // CHECK: [[VAL_31:%.*]] = arith.subi [[VAL_27]], [[VAL_29]] : i32 // CHECK: [[VAL_32:%.*]] = arith.subi [[VAL_28]], [[VAL_31]] : i32 // CHECK: [[VAL_33:%.*]] = arith.cmpi slt, [[VAL_32]], [[VAL_5]] : i32 - // CHECK: cond_br [[VAL_33]], ^bb1, ^bb17 + // CHECK: cf.cond_br [[VAL_33]], ^bb1, ^bb17 // CHECK: ^bb1: // CHECK: [[VAL_34:%.*]], [[VAL_35:%.*]] = gpu.shuffle xor [[VAL_0]], [[VAL_6]], [[VAL_32]] : f32 - // CHECK: cond_br [[VAL_35]], ^bb2, ^bb3 + // CHECK: cf.cond_br [[VAL_35]], ^bb2, ^bb3 // CHECK: ^bb2: // CHECK: [[VAL_36:%.*]] = arith.cmpf ugt, [[VAL_0]], [[VAL_34]] : f32 // CHECK: [[VAL_37:%.*]] = arith.select [[VAL_36]], [[VAL_0]], [[VAL_34]] : f32 - // CHECK: br ^bb4([[VAL_37]] : f32) + // CHECK: cf.br ^bb4([[VAL_37]] : f32) // CHECK: ^bb3: - // CHECK: br ^bb4([[VAL_0]] : f32) + // CHECK: cf.br ^bb4([[VAL_0]] : f32) // CHECK: ^bb4([[VAL_38:%.*]]: f32): // CHECK: [[VAL_39:%.*]], [[VAL_40:%.*]] = gpu.shuffle xor [[VAL_38]], [[VAL_7]], [[VAL_32]] : f32 - // CHECK: cond_br [[VAL_40]], ^bb5, ^bb6 + // CHECK: cf.cond_br [[VAL_40]], ^bb5, ^bb6 // CHECK: ^bb5: // CHECK: [[VAL_41:%.*]] = arith.cmpf ugt, [[VAL_38]], [[VAL_39]] : f32 // CHECK: [[VAL_42:%.*]] = arith.select [[VAL_41]], [[VAL_38]], [[VAL_39]] : f32 - // CHECK: br ^bb7([[VAL_42]] : f32) + // CHECK: cf.br ^bb7([[VAL_42]] : f32) // CHECK: ^bb6: - // CHECK: br ^bb7([[VAL_38]] : f32) + // CHECK: cf.br ^bb7([[VAL_38]] : f32) // CHECK: ^bb7([[VAL_43:%.*]]: f32): // CHECK: [[VAL_44:%.*]], [[VAL_45:%.*]] = gpu.shuffle xor [[VAL_43]], [[VAL_8]], [[VAL_32]] : f32 - // CHECK: cond_br [[VAL_45]], ^bb8, ^bb9 + // CHECK: cf.cond_br [[VAL_45]], ^bb8, ^bb9 // CHECK: ^bb8: // CHECK: [[VAL_46:%.*]] = arith.cmpf ugt, [[VAL_43]], [[VAL_44]] : f32 // CHECK: [[VAL_47:%.*]] = arith.select [[VAL_46]], [[VAL_43]], [[VAL_44]] : f32 - // CHECK: br ^bb10([[VAL_47]] : f32) + // CHECK: cf.br ^bb10([[VAL_47]] : f32) // CHECK: ^bb9: - // CHECK: br ^bb10([[VAL_43]] : f32) + // CHECK: cf.br ^bb10([[VAL_43]] : f32) // CHECK: ^bb10([[VAL_48:%.*]]: f32): // CHECK: [[VAL_49:%.*]], [[VAL_50:%.*]] = gpu.shuffle xor [[VAL_48]], [[VAL_9]], [[VAL_32]] : f32 - // CHECK: cond_br [[VAL_50]], ^bb11, ^bb12 + // CHECK: cf.cond_br [[VAL_50]], ^bb11, ^bb12 // CHECK: ^bb11: // CHECK: [[VAL_51:%.*]] = arith.cmpf ugt, [[VAL_48]], [[VAL_49]] : f32 // CHECK: [[VAL_52:%.*]] = arith.select [[VAL_51]], [[VAL_48]], [[VAL_49]] : f32 - // CHECK: br ^bb13([[VAL_52]] : f32) + // CHECK: cf.br ^bb13([[VAL_52]] : f32) // CHECK: ^bb12: - // CHECK: br ^bb13([[VAL_48]] : f32) + // CHECK: cf.br ^bb13([[VAL_48]] : f32) // CHECK: ^bb13([[VAL_53:%.*]]: f32): // CHECK: [[VAL_54:%.*]], [[VAL_55:%.*]] = gpu.shuffle xor [[VAL_53]], [[VAL_10]], [[VAL_32]] : f32 - // CHECK: cond_br [[VAL_55]], ^bb14, ^bb15 + // CHECK: cf.cond_br [[VAL_55]], ^bb14, ^bb15 // CHECK: ^bb14: // CHECK: [[VAL_56:%.*]] = arith.cmpf ugt, [[VAL_53]], [[VAL_54]] : f32 // CHECK: [[VAL_57:%.*]] = arith.select [[VAL_56]], [[VAL_53]], [[VAL_54]] : f32 - // CHECK: br ^bb16([[VAL_57]] : f32) + // CHECK: cf.br ^bb16([[VAL_57]] : f32) // CHECK: ^bb15: - // CHECK: br ^bb16([[VAL_53]] : f32) + // CHECK: cf.br ^bb16([[VAL_53]] : f32) // CHECK: ^bb16([[VAL_58:%.*]]: f32): - // CHECK: br ^bb18([[VAL_58]] : f32) + // CHECK: cf.br ^bb18([[VAL_58]] : f32) // CHECK: ^bb17: // CHECK: [[VAL_59:%.*]], [[VAL_60:%.*]] = gpu.shuffle xor [[VAL_0]], [[VAL_6]], [[VAL_5]] : f32 // CHECK: [[VAL_61:%.*]] = arith.cmpf ugt, [[VAL_0]], [[VAL_59]] : f32 @@ -103,74 +103,74 @@ // CHECK: [[VAL_75:%.*]], [[VAL_76:%.*]] = gpu.shuffle xor [[VAL_74]], [[VAL_10]], [[VAL_5]] : f32 // CHECK: [[VAL_77:%.*]] = arith.cmpf ugt, [[VAL_74]], [[VAL_75]] : f32 // CHECK: [[VAL_78:%.*]] = arith.select [[VAL_77]], [[VAL_74]], [[VAL_75]] : f32 - // CHECK: br ^bb18([[VAL_78]] : f32) + // CHECK: cf.br ^bb18([[VAL_78]] : f32) // CHECK: ^bb18([[VAL_79:%.*]]: f32): - // CHECK: cond_br [[VAL_30]], ^bb19, ^bb20 + // CHECK: cf.cond_br [[VAL_30]], ^bb19, ^bb20 // CHECK: ^bb19: // CHECK: [[VAL_80:%.*]] = arith.divsi [[VAL_27]], [[VAL_5]] : i32 // CHECK: [[VAL_81:%.*]] = arith.index_cast [[VAL_80]] : i32 to index // CHECK: store [[VAL_79]], [[VAL_1]]{{\[}}[[VAL_81]]] : memref<32xf32, 3> - // CHECK: br ^bb21 + // CHECK: cf.br ^bb21 // CHECK: ^bb20: - // CHECK: br ^bb21 + // CHECK: cf.br ^bb21 // CHECK: ^bb21: // CHECK: gpu.barrier // CHECK: [[VAL_82:%.*]] = arith.addi [[VAL_28]], [[VAL_2]] : i32 // CHECK: [[VAL_83:%.*]] = arith.divsi [[VAL_82]], [[VAL_5]] : i32 // CHECK: [[VAL_84:%.*]] = arith.cmpi slt, [[VAL_27]], [[VAL_83]] : i32 - // CHECK: cond_br [[VAL_84]], ^bb22, ^bb41 + // CHECK: cf.cond_br [[VAL_84]], ^bb22, ^bb41 // CHECK: ^bb22: // CHECK: [[VAL_85:%.*]] = arith.index_cast [[VAL_27]] : i32 to index // CHECK: [[VAL_86:%.*]] = memref.load [[VAL_1]]{{\[}}[[VAL_85]]] : memref<32xf32, 3> // CHECK: [[VAL_87:%.*]] = arith.cmpi slt, [[VAL_83]], [[VAL_5]] : i32 - // CHECK: cond_br [[VAL_87]], ^bb23, ^bb39 + // CHECK: cf.cond_br [[VAL_87]], ^bb23, ^bb39 // CHECK: ^bb23: // CHECK: [[VAL_88:%.*]], [[VAL_89:%.*]] = gpu.shuffle xor [[VAL_86]], [[VAL_6]], [[VAL_83]] : f32 - // CHECK: cond_br [[VAL_89]], ^bb24, ^bb25 + // CHECK: cf.cond_br [[VAL_89]], ^bb24, ^bb25 // CHECK: ^bb24: // CHECK: [[VAL_90:%.*]] = arith.cmpf ugt, [[VAL_86]], [[VAL_88]] : f32 // CHECK: [[VAL_91:%.*]] = arith.select [[VAL_90]], [[VAL_86]], [[VAL_88]] : f32 - // CHECK: br ^bb26([[VAL_91]] : f32) + // CHECK: cf.br ^bb26([[VAL_91]] : f32) // CHECK: ^bb25: - // CHECK: br ^bb26([[VAL_86]] : f32) + // CHECK: cf.br ^bb26([[VAL_86]] : f32) // CHECK: ^bb26([[VAL_92:%.*]]: f32): // CHECK: [[VAL_93:%.*]], [[VAL_94:%.*]] = gpu.shuffle xor [[VAL_92]], [[VAL_7]], [[VAL_83]] : f32 - // CHECK: cond_br [[VAL_94]], ^bb27, ^bb28 + // CHECK: cf.cond_br [[VAL_94]], ^bb27, ^bb28 // CHECK: ^bb27: // CHECK: [[VAL_95:%.*]] = arith.cmpf ugt, [[VAL_92]], [[VAL_93]] : f32 // CHECK: [[VAL_96:%.*]] = arith.select [[VAL_95]], [[VAL_92]], [[VAL_93]] : f32 - // CHECK: br ^bb29([[VAL_96]] : f32) + // CHECK: cf.br ^bb29([[VAL_96]] : f32) // CHECK: ^bb28: - // CHECK: br ^bb29([[VAL_92]] : f32) + // CHECK: cf.br ^bb29([[VAL_92]] : f32) // CHECK: ^bb29([[VAL_97:%.*]]: f32): // CHECK: [[VAL_98:%.*]], [[VAL_99:%.*]] = gpu.shuffle xor [[VAL_97]], [[VAL_8]], [[VAL_83]] : f32 - // CHECK: cond_br [[VAL_99]], ^bb30, ^bb31 + // CHECK: cf.cond_br [[VAL_99]], ^bb30, ^bb31 // CHECK: ^bb30: // CHECK: [[VAL_100:%.*]] = arith.cmpf ugt, [[VAL_97]], [[VAL_98]] : f32 // CHECK: [[VAL_101:%.*]] = arith.select [[VAL_100]], [[VAL_97]], [[VAL_98]] : f32 - // CHECK: br ^bb32([[VAL_101]] : f32) + // CHECK: cf.br ^bb32([[VAL_101]] : f32) // CHECK: ^bb31: - // CHECK: br ^bb32([[VAL_97]] : f32) + // CHECK: cf.br ^bb32([[VAL_97]] : f32) // CHECK: ^bb32([[VAL_102:%.*]]: f32): // CHECK: [[VAL_103:%.*]], [[VAL_104:%.*]] = gpu.shuffle xor [[VAL_102]], [[VAL_9]], [[VAL_83]] : f32 - // CHECK: cond_br [[VAL_104]], ^bb33, ^bb34 + // CHECK: cf.cond_br [[VAL_104]], ^bb33, ^bb34 // CHECK: ^bb33: // CHECK: [[VAL_105:%.*]] = arith.cmpf ugt, [[VAL_102]], [[VAL_103]] : f32 // CHECK: [[VAL_106:%.*]] = arith.select [[VAL_105]], [[VAL_102]], [[VAL_103]] : f32 - // CHECK: br ^bb35([[VAL_106]] : f32) + // CHECK: cf.br ^bb35([[VAL_106]] : f32) // CHECK: ^bb34: - // CHECK: br ^bb35([[VAL_102]] : f32) + // CHECK: cf.br ^bb35([[VAL_102]] : f32) // CHECK: ^bb35([[VAL_107:%.*]]: f32): // CHECK: [[VAL_108:%.*]], [[VAL_109:%.*]] = gpu.shuffle xor [[VAL_107]], [[VAL_10]], [[VAL_83]] : f32 - // CHECK: cond_br [[VAL_109]], ^bb36, ^bb37 + // CHECK: cf.cond_br [[VAL_109]], ^bb36, ^bb37 // CHECK: ^bb36: // CHECK: [[VAL_110:%.*]] = arith.cmpf ugt, [[VAL_107]], [[VAL_108]] : f32 // CHECK: [[VAL_111:%.*]] = arith.select [[VAL_110]], [[VAL_107]], [[VAL_108]] : f32 - // CHECK: br ^bb38([[VAL_111]] : f32) + // CHECK: cf.br ^bb38([[VAL_111]] : f32) // CHECK: ^bb37: - // CHECK: br ^bb38([[VAL_107]] : f32) + // CHECK: cf.br ^bb38([[VAL_107]] : f32) // CHECK: ^bb38([[VAL_112:%.*]]: f32): - // CHECK: br ^bb40([[VAL_112]] : f32) + // CHECK: cf.br ^bb40([[VAL_112]] : f32) // CHECK: ^bb39: // CHECK: [[VAL_113:%.*]], [[VAL_114:%.*]] = gpu.shuffle xor [[VAL_86]], [[VAL_6]], [[VAL_5]] : f32 // CHECK: [[VAL_115:%.*]] = arith.cmpf ugt, [[VAL_86]], [[VAL_113]] : f32 @@ -187,12 +187,12 @@ // CHECK: [[VAL_129:%.*]], [[VAL_130:%.*]] = gpu.shuffle xor [[VAL_128]], [[VAL_10]], [[VAL_5]] : f32 // CHECK: [[VAL_131:%.*]] = arith.cmpf ugt, [[VAL_128]], [[VAL_129]] : f32 // CHECK: [[VAL_132:%.*]] = arith.select [[VAL_131]], [[VAL_128]], [[VAL_129]] : f32 - // CHECK: br ^bb40([[VAL_132]] : f32) + // CHECK: cf.br ^bb40([[VAL_132]] : f32) // CHECK: ^bb40([[VAL_133:%.*]]: f32): // CHECK: store [[VAL_133]], [[VAL_1]]{{\[}}[[VAL_4]]] : memref<32xf32, 3> - // CHECK: br ^bb42 + // CHECK: cf.br ^bb42 // CHECK: ^bb41: - // CHECK: br ^bb42 + // CHECK: cf.br ^bb42 // CHECK: ^bb42: // CHECK: gpu.barrier %sum = gpu.all_reduce max %arg0 {} : (f32) -> (f32) diff --git a/mlir/test/Dialect/GPU/all-reduce.mlir b/mlir/test/Dialect/GPU/all-reduce.mlir --- a/mlir/test/Dialect/GPU/all-reduce.mlir +++ b/mlir/test/Dialect/GPU/all-reduce.mlir @@ -39,49 +39,49 @@ // CHECK: [[VAL_31:%.*]] = arith.subi [[VAL_27]], [[VAL_29]] : i32 // CHECK: [[VAL_32:%.*]] = arith.subi [[VAL_28]], [[VAL_31]] : i32 // CHECK: [[VAL_33:%.*]] = arith.cmpi slt, [[VAL_32]], [[VAL_5]] : i32 - // CHECK: cond_br [[VAL_33]], ^bb1, ^bb17 + // CHECK: cf.cond_br [[VAL_33]], ^bb1, ^bb17 // CHECK: ^bb1: // CHECK: [[VAL_34:%.*]], [[VAL_35:%.*]] = gpu.shuffle xor [[VAL_0]], [[VAL_6]], [[VAL_32]] : f32 - // CHECK: cond_br [[VAL_35]], ^bb2, ^bb3 + // CHECK: cf.cond_br [[VAL_35]], ^bb2, ^bb3 // CHECK: ^bb2: // CHECK: [[VAL_36:%.*]] = arith.addf [[VAL_0]], [[VAL_34]] : f32 - // CHECK: br ^bb4([[VAL_36]] : f32) + // CHECK: cf.br ^bb4([[VAL_36]] : f32) // CHECK: ^bb3: - // CHECK: br ^bb4([[VAL_0]] : f32) + // CHECK: cf.br ^bb4([[VAL_0]] : f32) // CHECK: ^bb4([[VAL_37:%.*]]: f32): // CHECK: [[VAL_38:%.*]], [[VAL_39:%.*]] = gpu.shuffle xor [[VAL_37]], [[VAL_7]], [[VAL_32]] : f32 - // CHECK: cond_br [[VAL_39]], ^bb5, ^bb6 + // CHECK: cf.cond_br [[VAL_39]], ^bb5, ^bb6 // CHECK: ^bb5: // CHECK: [[VAL_40:%.*]] = arith.addf [[VAL_37]], [[VAL_38]] : f32 - // CHECK: br ^bb7([[VAL_40]] : f32) + // CHECK: cf.br ^bb7([[VAL_40]] : f32) // CHECK: ^bb6: - // CHECK: br ^bb7([[VAL_37]] : f32) + // CHECK: cf.br ^bb7([[VAL_37]] : f32) // CHECK: ^bb7([[VAL_41:%.*]]: f32): // CHECK: [[VAL_42:%.*]], [[VAL_43:%.*]] = gpu.shuffle xor [[VAL_41]], [[VAL_8]], [[VAL_32]] : f32 - // CHECK: cond_br [[VAL_43]], ^bb8, ^bb9 + // CHECK: cf.cond_br [[VAL_43]], ^bb8, ^bb9 // CHECK: ^bb8: // CHECK: [[VAL_44:%.*]] = arith.addf [[VAL_41]], [[VAL_42]] : f32 - // CHECK: br ^bb10([[VAL_44]] : f32) + // CHECK: cf.br ^bb10([[VAL_44]] : f32) // CHECK: ^bb9: - // CHECK: br ^bb10([[VAL_41]] : f32) + // CHECK: cf.br ^bb10([[VAL_41]] : f32) // CHECK: ^bb10([[VAL_45:%.*]]: f32): // CHECK: [[VAL_46:%.*]], [[VAL_47:%.*]] = gpu.shuffle xor [[VAL_45]], [[VAL_9]], [[VAL_32]] : f32 - // CHECK: cond_br [[VAL_47]], ^bb11, ^bb12 + // CHECK: cf.cond_br [[VAL_47]], ^bb11, ^bb12 // CHECK: ^bb11: // CHECK: [[VAL_48:%.*]] = arith.addf [[VAL_45]], [[VAL_46]] : f32 - // CHECK: br ^bb13([[VAL_48]] : f32) + // CHECK: cf.br ^bb13([[VAL_48]] : f32) // CHECK: ^bb12: - // CHECK: br ^bb13([[VAL_45]] : f32) + // CHECK: cf.br ^bb13([[VAL_45]] : f32) // CHECK: ^bb13([[VAL_49:%.*]]: f32): // CHECK: [[VAL_50:%.*]], [[VAL_51:%.*]] = gpu.shuffle xor [[VAL_49]], [[VAL_10]], [[VAL_32]] : f32 - // CHECK: cond_br [[VAL_51]], ^bb14, ^bb15 + // CHECK: cf.cond_br [[VAL_51]], ^bb14, ^bb15 // CHECK: ^bb14: // CHECK: [[VAL_52:%.*]] = arith.addf [[VAL_49]], [[VAL_50]] : f32 - // CHECK: br ^bb16([[VAL_52]] : f32) + // CHECK: cf.br ^bb16([[VAL_52]] : f32) // CHECK: ^bb15: - // CHECK: br ^bb16([[VAL_49]] : f32) + // CHECK: cf.br ^bb16([[VAL_49]] : f32) // CHECK: ^bb16([[VAL_53:%.*]]: f32): - // CHECK: br ^bb18([[VAL_53]] : f32) + // CHECK: cf.br ^bb18([[VAL_53]] : f32) // CHECK: ^bb17: // CHECK: [[VAL_54:%.*]], [[VAL_55:%.*]] = gpu.shuffle xor [[VAL_0]], [[VAL_6]], [[VAL_5]] : f32 // CHECK: [[VAL_56:%.*]] = arith.addf [[VAL_0]], [[VAL_54]] : f32 @@ -93,69 +93,69 @@ // CHECK: [[VAL_65:%.*]] = arith.addf [[VAL_62]], [[VAL_63]] : f32 // CHECK: [[VAL_66:%.*]], [[VAL_67:%.*]] = gpu.shuffle xor [[VAL_65]], [[VAL_10]], [[VAL_5]] : f32 // CHECK: [[VAL_68:%.*]] = arith.addf [[VAL_65]], [[VAL_66]] : f32 - // CHECK: br ^bb18([[VAL_68]] : f32) + // CHECK: cf.br ^bb18([[VAL_68]] : f32) // CHECK: ^bb18([[VAL_69:%.*]]: f32): - // CHECK: cond_br [[VAL_30]], ^bb19, ^bb20 + // CHECK: cf.cond_br [[VAL_30]], ^bb19, ^bb20 // CHECK: ^bb19: // CHECK: [[VAL_70:%.*]] = arith.divsi [[VAL_27]], [[VAL_5]] : i32 // CHECK: [[VAL_71:%.*]] = arith.index_cast [[VAL_70]] : i32 to index // CHECK: store [[VAL_69]], [[VAL_1]]{{\[}}[[VAL_71]]] : memref<32xf32, 3> - // CHECK: br ^bb21 + // CHECK: cf.br ^bb21 // CHECK: ^bb20: - // CHECK: br ^bb21 + // CHECK: cf.br ^bb21 // CHECK: ^bb21: // CHECK: gpu.barrier // CHECK: [[VAL_72:%.*]] = arith.addi [[VAL_28]], [[VAL_2]] : i32 // CHECK: [[VAL_73:%.*]] = arith.divsi [[VAL_72]], [[VAL_5]] : i32 // CHECK: [[VAL_74:%.*]] = arith.cmpi slt, [[VAL_27]], [[VAL_73]] : i32 - // CHECK: cond_br [[VAL_74]], ^bb22, ^bb41 + // CHECK: cf.cond_br [[VAL_74]], ^bb22, ^bb41 // CHECK: ^bb22: // CHECK: [[VAL_75:%.*]] = arith.index_cast [[VAL_27]] : i32 to index // CHECK: [[VAL_76:%.*]] = memref.load [[VAL_1]]{{\[}}[[VAL_75]]] : memref<32xf32, 3> // CHECK: [[VAL_77:%.*]] = arith.cmpi slt, [[VAL_73]], [[VAL_5]] : i32 - // CHECK: cond_br [[VAL_77]], ^bb23, ^bb39 + // CHECK: cf.cond_br [[VAL_77]], ^bb23, ^bb39 // CHECK: ^bb23: // CHECK: [[VAL_78:%.*]], [[VAL_79:%.*]] = gpu.shuffle xor [[VAL_76]], [[VAL_6]], [[VAL_73]] : f32 - // CHECK: cond_br [[VAL_79]], ^bb24, ^bb25 + // CHECK: cf.cond_br [[VAL_79]], ^bb24, ^bb25 // CHECK: ^bb24: // CHECK: [[VAL_80:%.*]] = arith.addf [[VAL_76]], [[VAL_78]] : f32 - // CHECK: br ^bb26([[VAL_80]] : f32) + // CHECK: cf.br ^bb26([[VAL_80]] : f32) // CHECK: ^bb25: - // CHECK: br ^bb26([[VAL_76]] : f32) + // CHECK: cf.br ^bb26([[VAL_76]] : f32) // CHECK: ^bb26([[VAL_81:%.*]]: f32): // CHECK: [[VAL_82:%.*]], [[VAL_83:%.*]] = gpu.shuffle xor [[VAL_81]], [[VAL_7]], [[VAL_73]] : f32 - // CHECK: cond_br [[VAL_83]], ^bb27, ^bb28 + // CHECK: cf.cond_br [[VAL_83]], ^bb27, ^bb28 // CHECK: ^bb27: // CHECK: [[VAL_84:%.*]] = arith.addf [[VAL_81]], [[VAL_82]] : f32 - // CHECK: br ^bb29([[VAL_84]] : f32) + // CHECK: cf.br ^bb29([[VAL_84]] : f32) // CHECK: ^bb28: - // CHECK: br ^bb29([[VAL_81]] : f32) + // CHECK: cf.br ^bb29([[VAL_81]] : f32) // CHECK: ^bb29([[VAL_85:%.*]]: f32): // CHECK: [[VAL_86:%.*]], [[VAL_87:%.*]] = gpu.shuffle xor [[VAL_85]], [[VAL_8]], [[VAL_73]] : f32 - // CHECK: cond_br [[VAL_87]], ^bb30, ^bb31 + // CHECK: cf.cond_br [[VAL_87]], ^bb30, ^bb31 // CHECK: ^bb30: // CHECK: [[VAL_88:%.*]] = arith.addf [[VAL_85]], [[VAL_86]] : f32 - // CHECK: br ^bb32([[VAL_88]] : f32) + // CHECK: cf.br ^bb32([[VAL_88]] : f32) // CHECK: ^bb31: - // CHECK: br ^bb32([[VAL_85]] : f32) + // CHECK: cf.br ^bb32([[VAL_85]] : f32) // CHECK: ^bb32([[VAL_89:%.*]]: f32): // CHECK: [[VAL_90:%.*]], [[VAL_91:%.*]] = gpu.shuffle xor [[VAL_89]], [[VAL_9]], [[VAL_73]] : f32 - // CHECK: cond_br [[VAL_91]], ^bb33, ^bb34 + // CHECK: cf.cond_br [[VAL_91]], ^bb33, ^bb34 // CHECK: ^bb33: // CHECK: [[VAL_92:%.*]] = arith.addf [[VAL_89]], [[VAL_90]] : f32 - // CHECK: br ^bb35([[VAL_92]] : f32) + // CHECK: cf.br ^bb35([[VAL_92]] : f32) // CHECK: ^bb34: - // CHECK: br ^bb35([[VAL_89]] : f32) + // CHECK: cf.br ^bb35([[VAL_89]] : f32) // CHECK: ^bb35([[VAL_93:%.*]]: f32): // CHECK: [[VAL_94:%.*]], [[VAL_95:%.*]] = gpu.shuffle xor [[VAL_93]], [[VAL_10]], [[VAL_73]] : f32 - // CHECK: cond_br [[VAL_95]], ^bb36, ^bb37 + // CHECK: cf.cond_br [[VAL_95]], ^bb36, ^bb37 // CHECK: ^bb36: // CHECK: [[VAL_96:%.*]] = arith.addf [[VAL_93]], [[VAL_94]] : f32 - // CHECK: br ^bb38([[VAL_96]] : f32) + // CHECK: cf.br ^bb38([[VAL_96]] : f32) // CHECK: ^bb37: - // CHECK: br ^bb38([[VAL_93]] : f32) + // CHECK: cf.br ^bb38([[VAL_93]] : f32) // CHECK: ^bb38([[VAL_97:%.*]]: f32): - // CHECK: br ^bb40([[VAL_97]] : f32) + // CHECK: cf.br ^bb40([[VAL_97]] : f32) // CHECK: ^bb39: // CHECK: [[VAL_98:%.*]], [[VAL_99:%.*]] = gpu.shuffle xor [[VAL_76]], [[VAL_6]], [[VAL_5]] : f32 // CHECK: [[VAL_100:%.*]] = arith.addf [[VAL_76]], [[VAL_98]] : f32 @@ -167,12 +167,12 @@ // CHECK: [[VAL_109:%.*]] = arith.addf [[VAL_106]], [[VAL_107]] : f32 // CHECK: [[VAL_110:%.*]], [[VAL_111:%.*]] = gpu.shuffle xor [[VAL_109]], [[VAL_10]], [[VAL_5]] : f32 // CHECK: [[VAL_112:%.*]] = arith.addf [[VAL_109]], [[VAL_110]] : f32 - // CHECK: br ^bb40([[VAL_112]] : f32) + // CHECK: cf.br ^bb40([[VAL_112]] : f32) // CHECK: ^bb40([[VAL_113:%.*]]: f32): // CHECK: store [[VAL_113]], [[VAL_1]]{{\[}}[[VAL_4]]] : memref<32xf32, 3> - // CHECK: br ^bb42 + // CHECK: cf.br ^bb42 // CHECK: ^bb41: - // CHECK: br ^bb42 + // CHECK: cf.br ^bb42 // CHECK: ^bb42: // CHECK: gpu.barrier %sum = gpu.all_reduce add %arg0 {} : (f32) -> (f32) diff --git a/mlir/test/Dialect/GPU/outlining.mlir b/mlir/test/Dialect/GPU/outlining.mlir --- a/mlir/test/Dialect/GPU/outlining.mlir +++ b/mlir/test/Dialect/GPU/outlining.mlir @@ -53,7 +53,7 @@ // CHECK-NEXT: %[[BDIM:.*]] = gpu.block_dim x // CHECK-NEXT: = gpu.block_dim y // CHECK-NEXT: = gpu.block_dim z -// CHECK-NEXT: br ^[[BLOCK:.*]] +// CHECK-NEXT: cf.br ^[[BLOCK:.*]] // CHECK-NEXT: ^[[BLOCK]]: // CHECK-NEXT: "use"(%[[KERNEL_ARG0]]) : (f32) -> () // CHECK-NEXT: "some_op"(%[[BID]], %[[BDIM]]) : (index, index) -> () diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir --- a/mlir/test/Dialect/Linalg/canonicalize.mlir +++ b/mlir/test/Dialect/Linalg/canonicalize.mlir @@ -208,7 +208,7 @@ %0 = tensor.dim %arg0, %c0 : tensor %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor - br ^bb1(%cst : f32) + cf.br ^bb1(%cst : f32) ^bb1(%arg1 : f32): %3 = linalg.generic @@ -234,7 +234,7 @@ %0 = tensor.dim %arg0, %c0 : tensor %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor - br ^bb1(%cst : f32) + cf.br ^bb1(%cst : f32) ^bb1(%arg2 : f32): %3:2 = linalg.generic diff --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir --- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir +++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir @@ -20,7 +20,7 @@ func @swappy(%cond1 : i1, %cond2 : i1, %t1 : tensor, %t2 : tensor) -> (tensor, tensor) { - cond_br %cond1, ^bb1, ^bb2 + cf.cond_br %cond1, ^bb1, ^bb2 ^bb1: %T:2 = scf.if %cond2 -> (tensor, tensor) { diff --git a/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir b/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir --- a/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir @@ -17,7 +17,7 @@ } -> tensor %4 = tensor.extract %3[] : tensor %5 = arith.trunci %4 : i8 to i1 - cond_br %5, ^bb1, ^bb2(%arg1_t : tensor) + cf.cond_br %5, ^bb1, ^bb2(%arg1_t : tensor) ^bb1: %6 = linalg.init_tensor [] : tensor %7 = linalg.generic @@ -28,7 +28,7 @@ %10 = arith.addi %arg2, %arg3 : i32 linalg.yield %10 : i32 } -> tensor - br ^bb2(%7 : tensor) + cf.br ^bb2(%7 : tensor) ^bb2(%8: tensor): return %8 : tensor } @@ -36,10 +36,10 @@ // CHECK-LABEL: func @if_true_test // CHECK-SAME: (%[[arg0:.*]]: i1, %[[arg1:.*]]: i32) // CHECK-NEXT: arith.constant 10 : i32 -// CHECK-NEXT: cond_br %[[arg0]], ^[[bb1:.*]], ^[[bb2:.*]](%[[arg1]] : i32) +// CHECK-NEXT: cf.cond_br %[[arg0]], ^[[bb1:.*]], ^[[bb2:.*]](%[[arg1]] : i32) // CHECK-NEXT: ^[[bb1]]: // CHECK-NEXT: %[[add_res:.*]] = arith.addi -// CHECK-NEXT: br ^[[bb2]](%[[add_res]] : i32) +// CHECK-NEXT: cf.br ^[[bb2]](%[[add_res]] : i32) // CHECK-NEXT: ^[[bb2]] // CHECK-NEXT: %[[func_res:.*]] = tensor.from_elements // CHECK-NEXT: return %[[func_res]] diff --git a/mlir/test/Dialect/Linalg/detensorize_if.mlir b/mlir/test/Dialect/Linalg/detensorize_if.mlir --- a/mlir/test/Dialect/Linalg/detensorize_if.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_if.mlir @@ -12,7 +12,7 @@ %0 = tensor.from_elements %c0 : tensor %c10 = arith.constant 10 : i32 %1 = tensor.from_elements %c10 : tensor - br ^bb1(%0 : tensor) + cf.br ^bb1(%0 : tensor) ^bb1(%2: tensor): // 2 preds: ^bb0, ^bb2 %3 = linalg.init_tensor [] : tensor @@ -24,7 +24,7 @@ linalg.yield %8 : i1 } -> tensor %5 = tensor.extract %4[] : tensor - cond_br %5, ^bb2(%2 : tensor), ^bb3(%2 : tensor) + cf.cond_br %5, ^bb2(%2 : tensor), ^bb3(%2 : tensor) ^bb2(%6: tensor): // pred: ^bb1 %7 = linalg.init_tensor [] : tensor @@ -35,7 +35,7 @@ %9 = arith.addi %arg0, %arg1 : i32 linalg.yield %9 : i32 } -> tensor - br ^bb3(%8 : tensor) + cf.br ^bb3(%8 : tensor) ^bb3(%10: tensor): // pred: ^bb1 return %10 : tensor @@ -44,13 +44,13 @@ // CHECK-LABEL: func @main() // CHECK-NEXT: arith.constant 0 // CHECK-NEXT: arith.constant 10 -// CHECK-NEXT: br ^[[bb1:.*]](%{{.*}}: i32) +// CHECK-NEXT: cf.br ^[[bb1:.*]](%{{.*}}: i32) // CHECK-NEXT: ^[[bb1]](%{{.*}}: i32): // CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}} -// CHECK-NEXT: cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb3(%{{.*}} : i32) +// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb3(%{{.*}} : i32) // CHECK-NEXT: ^[[bb2]](%{{.*}}: i32) // CHECK-NEXT: arith.addi %{{.*}}, %{{.*}} -// CHECK-NEXT: br ^[[bb3:.*]](%{{.*}} : i32) +// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32) // CHECK-NEXT: ^[[bb3]](%{{.*}}: i32) // CHECK-NEXT: tensor.from_elements %{{.*}} : tensor // CHECK-NEXT: return %{{.*}} @@ -73,7 +73,7 @@ %0 = tensor.from_elements %c0 : tensor %c10 = arith.constant 10 : i32 %1 = tensor.from_elements %c10 : tensor - br ^bb1(%0 : tensor) + cf.br ^bb1(%0 : tensor) ^bb1(%2: tensor): // 2 preds: ^bb0, ^bb2 %3 = linalg.init_tensor [] : tensor @@ -85,7 +85,7 @@ linalg.yield %8 : i1 } -> tensor %5 = tensor.extract %4[] : tensor - cond_br %5, ^bb2(%2 : tensor), ^bb3(%2 : tensor) + cf.cond_br %5, ^bb2(%2 : tensor), ^bb3(%2 : tensor) ^bb2(%6: tensor): // pred: ^bb1 %7 = linalg.init_tensor [] : tensor @@ -96,10 +96,10 @@ %9 = arith.addi %arg0, %arg1 : i32 linalg.yield %9 : i32 } -> tensor - br ^bb3(%8 : tensor) + cf.br ^bb3(%8 : tensor) ^bb3(%10: tensor): // pred: ^bb1 - br ^bb4(%10 : tensor) + cf.br ^bb4(%10 : tensor) ^bb4(%11: tensor): // pred: ^bb1 return %11 : tensor @@ -108,15 +108,15 @@ // CHECK-LABEL: func @main() // CHECK-NEXT: arith.constant 0 // CHECK-NEXT: arith.constant 10 -// CHECK-NEXT: br ^[[bb1:.*]](%{{.*}}: i32) +// CHECK-NEXT: cf.br ^[[bb1:.*]](%{{.*}}: i32) // CHECK-NEXT: ^[[bb1]](%{{.*}}: i32): // CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}} -// CHECK-NEXT: cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb3(%{{.*}} : i32) +// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb3(%{{.*}} : i32) // CHECK-NEXT: ^[[bb2]](%{{.*}}: i32) // CHECK-NEXT: arith.addi %{{.*}}, %{{.*}} -// CHECK-NEXT: br ^[[bb3:.*]](%{{.*}} : i32) +// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32) // CHECK-NEXT: ^[[bb3]](%{{.*}}: i32) -// CHECK-NEXT: br ^[[bb4:.*]](%{{.*}} : i32) +// CHECK-NEXT: cf.br ^[[bb4:.*]](%{{.*}} : i32) // CHECK-NEXT: ^[[bb4]](%{{.*}}: i32) // CHECK-NEXT: tensor.from_elements %{{.*}} : tensor // CHECK-NEXT: return %{{.*}} @@ -136,7 +136,7 @@ %0 = tensor.from_elements %c0 : tensor %c10 = arith.constant 10 : i32 %1 = tensor.from_elements %c10 : tensor - br ^bb1(%0 : tensor) + cf.br ^bb1(%0 : tensor) ^bb1(%2: tensor): // 2 preds: ^bb0, ^bb2 %3 = linalg.init_tensor [] : tensor @@ -148,11 +148,11 @@ linalg.yield %8 : i1 } -> tensor %5 = tensor.extract %4[] : tensor - // This cond_br intentionally has bb2 as it's target for both branches. This + // This cf.cond_br intentionally has bb2 as it's target for both branches. This // is to make sure that the "forward phase" of the cost-model correctly adds // the users of a block argument (in this case bb2's argument) to the work // list. - cond_br %5, ^bb2(%2 : tensor), ^bb2(%2 : tensor) + cf.cond_br %5, ^bb2(%2 : tensor), ^bb2(%2 : tensor) ^bb2(%6: tensor): // pred: ^bb1 %12 = tensor.from_elements %c10 : tensor @@ -164,7 +164,7 @@ %9 = arith.addi %arg0, %arg1 : i32 linalg.yield %9 : i32 } -> tensor - br ^bb3(%8 : tensor) + cf.br ^bb3(%8 : tensor) ^bb3(%10: tensor): // pred: ^bb1 return %10 : tensor @@ -173,13 +173,13 @@ // CHECK-LABEL: func @main() // CHECK-NEXT: arith.constant 0 // CHECK-NEXT: arith.constant 10 -// CHECK-NEXT: br ^[[bb1:.*]](%{{.*}}: i32) +// CHECK-NEXT: cf.br ^[[bb1:.*]](%{{.*}}: i32) // CHECK-NEXT: ^[[bb1]](%{{.*}}: i32): // CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}} -// CHECK-NEXT: cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb2(%{{.*}} : i32) +// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb2(%{{.*}} : i32) // CHECK-NEXT: ^[[bb2]](%{{.*}}: i32) // CHECK-NEXT: arith.addi %{{.*}}, %{{.*}} -// CHECK-NEXT: br ^[[bb3:.*]](%{{.*}} : i32) +// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32) // CHECK-NEXT: ^[[bb3]](%{{.*}}: i32) // CHECK-NEXT: tensor.from_elements %{{.*}} : tensor // CHECK-NEXT: return %{{.*}} diff --git a/mlir/test/Dialect/Linalg/detensorize_while.mlir b/mlir/test/Dialect/Linalg/detensorize_while.mlir --- a/mlir/test/Dialect/Linalg/detensorize_while.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_while.mlir @@ -9,7 +9,7 @@ } func @main(%farg0: tensor, %farg1: tensor) -> tensor attributes {} { - br ^bb1(%farg0 : tensor) + cf.br ^bb1(%farg0 : tensor) ^bb1(%0: tensor): // 2 preds: ^bb0, ^bb2 %1 = linalg.init_tensor [] : tensor @@ -21,7 +21,7 @@ linalg.yield %8 : i1 } -> tensor %3 = tensor.extract %2[] : tensor - cond_br %3, ^bb2(%0 : tensor), ^bb3(%0 : tensor) + cf.cond_br %3, ^bb2(%0 : tensor), ^bb3(%0 : tensor) ^bb2(%4: tensor): // pred: ^bb1 %5 = linalg.init_tensor [] : tensor @@ -32,7 +32,7 @@ %8 = arith.addi %arg0, %arg1 : i32 linalg.yield %8 : i32 } -> tensor - br ^bb1(%6 : tensor) + cf.br ^bb1(%6 : tensor) ^bb3(%7: tensor): // pred: ^bb1 return %7 : tensor @@ -43,13 +43,13 @@ // DET-ALL-LABEL: func @main // DET-ALL-SAME: (%{{.*}}: tensor, %{{.*}}: tensor) // DET-ALL: tensor.extract {{.*}} -// DET-ALL: br ^[[bb1:.*]](%{{.*}} : i32) +// DET-ALL: cf.br ^[[bb1:.*]](%{{.*}} : i32) // DET-ALL: ^[[bb1]](%{{.*}}: i32) // DET-ALL: arith.cmpi slt, {{.*}} -// DET-ALL: cond_br {{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32) +// DET-ALL: cf.cond_br {{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32) // DET-ALL: ^[[bb2]](%{{.*}}: i32) // DET-ALL: arith.addi {{.*}} -// DET-ALL: br ^[[bb1]](%{{.*}} : i32) +// DET-ALL: cf.br ^[[bb1]](%{{.*}} : i32) // DET-ALL: ^[[bb3]](%{{.*}}: i32) // DET-ALL: tensor.from_elements {{.*}} // DET-ALL: return %{{.*}} : tensor @@ -59,13 +59,13 @@ // DET-CF-LABEL: func @main // DET-CF-SAME: (%{{.*}}: tensor, %{{.*}}: tensor) // DET-CF: tensor.extract {{.*}} -// DET-CF: br ^[[bb1:.*]](%{{.*}} : i32) +// DET-CF: cf.br ^[[bb1:.*]](%{{.*}} : i32) // DET-CF: ^[[bb1]](%{{.*}}: i32) // DET-CF: arith.cmpi slt, {{.*}} -// DET-CF: cond_br {{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32) +// DET-CF: cf.cond_br {{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32) // DET-CF: ^[[bb2]](%{{.*}}: i32) // DET-CF: arith.addi {{.*}} -// DET-CF: br ^[[bb1]](%{{.*}} : i32) +// DET-CF: cf.br ^[[bb1]](%{{.*}} : i32) // DET-CF: ^[[bb3]](%{{.*}}: i32) // DET-CF: tensor.from_elements %{{.*}} : tensor // DET-CF: return %{{.*}} : tensor diff --git a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir --- a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir @@ -22,7 +22,7 @@ } func @main(%farg0: tensor<10xi32>, %farg1: tensor) -> tensor attributes {} { - br ^bb1(%farg0 : tensor<10xi32>) + cf.br ^bb1(%farg0 : tensor<10xi32>) ^bb1(%0: tensor<10xi32>): // 2 preds: ^bb0, ^bb2 %1 = linalg.init_tensor [] : tensor @@ -43,7 +43,7 @@ linalg.yield %8 : i1 } -> tensor %5 = tensor.extract %4[] : tensor - cond_br %5, ^bb2(%2 : tensor), ^bb3(%2 : tensor) + cf.cond_br %5, ^bb2(%2 : tensor), ^bb3(%2 : tensor) ^bb2(%6: tensor): // pred: ^bb1 %7 = linalg.init_tensor [10] : tensor<10xi32> @@ -54,7 +54,7 @@ linalg.yield %a : i32 } -> tensor<10xi32> - br ^bb1(%9 : tensor<10xi32>) + cf.br ^bb1(%9 : tensor<10xi32>) ^bb3(%10: tensor): // pred: ^bb1 return %10 : tensor @@ -64,7 +64,7 @@ // // DET-ALL-LABEL: func @main // DET-ALL-SAME: (%{{.*}}: tensor<10xi32>, %{{.*}}: tensor) -// DET-ALL: br ^[[bb1:.*]](%{{.*}} : tensor<10xi32>) +// DET-ALL: cf.br ^[[bb1:.*]](%{{.*}} : tensor<10xi32>) // DET-ALL: ^[[bb1]](%{{.*}}: tensor<10xi32>) // DET-ALL: linalg.init_tensor [] : tensor // DET-ALL: linalg.generic {{{.*}}} ins(%{{.*}} : tensor<10xi32>) outs(%{{.*}} : tensor) { @@ -74,7 +74,7 @@ // DET-ALL: } -> tensor // DET-ALL: tensor.extract %{{.*}}[] : tensor // DET-ALL: cmpi slt, %{{.*}}, %{{.*}} : i32 -// DET-ALL: cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32) +// DET-ALL: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32) // DET-ALL: ^[[bb2]](%{{.*}}: i32) // DET-ALL: tensor.from_elements %{{.*}} : tensor // DET-ALL: linalg.init_tensor [10] : tensor<10xi32> @@ -82,7 +82,7 @@ // DET-ALL: ^bb0(%{{.*}}: i32, %{{.*}}: i32): // DET-ALL: linalg.yield %{{.*}} : i32 // DET-ALL: } -> tensor<10xi32> -// DET-ALL: br ^[[bb1]](%{{.*}} : tensor<10xi32>) +// DET-ALL: cf.br ^[[bb1]](%{{.*}} : tensor<10xi32>) // DET-ALL: ^[[bb3]](%{{.*}}: i32) // DET-ALL: tensor.from_elements %{{.*}} : tensor // DET-ALL: return %{{.*}} : tensor @@ -90,15 +90,15 @@ // DET-CF-LABEL: func @main // DET-CF-SAME: (%{{.*}}: tensor<10xi32>, %{{.*}}: tensor) -// DET-CF: br ^[[bb1:.*]](%{{.*}} : tensor<10xi32>) +// DET-CF: cf.br ^[[bb1:.*]](%{{.*}} : tensor<10xi32>) // DET-CF: ^bb1(%{{.*}}: tensor<10xi32>) // DET-CF: %{{.*}} = linalg.generic {{{.*}}} ins(%{{.*}} : tensor<10xi32>) outs(%{{.*}} : tensor) { // DET-CF: tensor.extract %{{.*}}[] : tensor // DET-CF: cmpi slt, %{{.*}}, %{{.*}} : i32 -// DET-CF: cond_br %{{.*}}, ^bb2(%{{.*}} : tensor), ^bb3(%{{.*}} : tensor) +// DET-CF: cf.cond_br %{{.*}}, ^bb2(%{{.*}} : tensor), ^bb3(%{{.*}} : tensor) // DET-CF: ^bb2(%{{.*}}: tensor) // DET-CF: %{{.*}} = linalg.generic {{{.*}}} ins(%{{.*}} : tensor) outs(%{{.*}} : tensor<10xi32>) { -// DET-CF: br ^bb1(%{{.*}} : tensor<10xi32>) +// DET-CF: cf.br ^bb1(%{{.*}} : tensor<10xi32>) // DET-CF: ^bb3(%{{.*}}: tensor) // DET-CF: return %{{.*}} : tensor // DET-CF: } diff --git a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir --- a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir @@ -14,7 +14,7 @@ %c10 = arith.constant 10 : i32 %1 = tensor.from_elements %c10 : tensor<1xi32> %reshaped1 = tensor.collapse_shape %1 [] : tensor<1xi32> into tensor - br ^bb1(%reshaped0 : tensor) + cf.br ^bb1(%reshaped0 : tensor) ^bb1(%2: tensor): // 2 preds: ^bb0, ^bb2 %3 = linalg.init_tensor [] : tensor @@ -26,7 +26,7 @@ linalg.yield %8 : i1 } -> tensor %5 = tensor.extract %4[] : tensor - cond_br %5, ^bb2(%2 : tensor), ^bb3 + cf.cond_br %5, ^bb2(%2 : tensor), ^bb3 ^bb2(%6: tensor): // pred: ^bb1 %7 = linalg.init_tensor [] : tensor @@ -37,7 +37,7 @@ %9 = arith.addi %arg0, %arg1 : i32 linalg.yield %9 : i32 } -> tensor - br ^bb1(%8 : tensor) + cf.br ^bb1(%8 : tensor) ^bb3: // pred: ^bb1 return @@ -46,13 +46,13 @@ // CHECK-LABEL: func @main // CHECK-NEXT: arith.constant 0 : i32 // CHECK-NEXT: arith.constant 10 -// CHECK-NEXT: br ^[[bb1:.*]](%{{.*}} : i32) +// CHECK-NEXT: cf.br ^[[bb1:.*]](%{{.*}} : i32) // CHECK-NEXT: ^[[bb1]](%{{.*}}: i32) // CHECK-NEXT: %{{.*}} = arith.cmpi slt, %{{.*}}, %{{.*}} -// CHECK-NEXT: cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]] +// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]] // CHECK-NEXT: ^[[bb2]](%{{.*}}: i32) // CHECK-NEXT: %{{.*}} = arith.addi %{{.*}}, %{{.*}} -// CHECK-NEXT: br ^[[bb1]](%{{.*}} : i32) +// CHECK-NEXT: cf.br ^[[bb1]](%{{.*}} : i32) // CHECK-NEXT: ^[[bb3]]: // CHECK-NEXT: return // CHECK-NEXT: } diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir --- a/mlir/test/Dialect/OpenMP/ops.mlir +++ b/mlir/test/Dialect/OpenMP/ops.mlir @@ -250,7 +250,7 @@ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) omp.wsloop (%iv) : index = (%lb) to (%ub) step (%step) { %1 = "test.payload"(%iv) : (index) -> (i32) - br ^bb1(%1: i32) + cf.br ^bb1(%1: i32) ^bb1(%arg: i32): memref.store %arg, %data1[%iv] : memref omp.yield @@ -260,13 +260,13 @@ omp.wsloop (%iv) : index = (%lb) to (%ub) step (%step) { %c = "test.condition"(%iv) : (index) -> (i1) %v1 = "test.payload"(%iv) : (index) -> (i32) - cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32) + cf.cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32) ^bb1(%arg0: i32): memref.store %arg0, %data1[%iv] : memref - br ^bb3 + cf.br ^bb3 ^bb2(%arg1: i32): memref.store %arg1, %data2[%iv] : memref - br ^bb3 + cf.br ^bb3 ^bb3: omp.yield } @@ -275,7 +275,7 @@ omp.wsloop (%iv) : index = (%lb) to (%ub) step (%step) { %c = "test.condition"(%iv) : (index) -> (i1) %v1 = "test.payload"(%iv) : (index) -> (i32) - cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32) + cf.cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32) ^bb1(%arg0: i32): memref.store %arg0, %data1[%iv] : memref omp.yield @@ -294,7 +294,7 @@ // CHECK: omp.wsloop (%{{.*}}) : i32 = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) omp.wsloop (%iv1) : i32 = (%lb1) to (%ub1) step (%step1) { %1 = "test.payload"(%iv1) : (i32) -> (index) - br ^bb1(%1: index) + cf.br ^bb1(%1: index) ^bb1(%arg1: index): memref.store %iv1, %data1[%arg1] : memref omp.yield @@ -303,7 +303,7 @@ // CHECK: omp.wsloop (%{{.*}}) : i64 = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) omp.wsloop (%iv2) : i64 = (%lb2) to (%ub2) step (%step2) { %2 = "test.payload"(%iv2) : (i64) -> (index) - br ^bb1(%2: index) + cf.br ^bb1(%2: index) ^bb1(%arg2: index): memref.store %iv2, %data2[%arg2] : memref omp.yield diff --git a/mlir/test/Dialect/SCF/canonicalize.mlir b/mlir/test/Dialect/SCF/canonicalize.mlir --- a/mlir/test/Dialect/SCF/canonicalize.mlir +++ b/mlir/test/Dialect/SCF/canonicalize.mlir @@ -1127,15 +1127,15 @@ affine.for %i = 0 to 100 { "test.foo"() : () -> () %v = scf.execute_region -> i64 { - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: %c1 = arith.constant 1 : i64 - br ^bb3(%c1 : i64) + cf.br ^bb3(%c1 : i64) ^bb2: %c2 = arith.constant 2 : i64 - br ^bb3(%c2 : i64) + cf.br ^bb3(%c2 : i64) ^bb3(%x : i64): scf.yield %x : i64 @@ -1177,13 +1177,13 @@ "test.foo"() : () -> () %v = scf.execute_region -> i64 { %c = "test.cmp"() : () -> i1 - cond_br %c, ^bb2, ^bb3 + cf.cond_br %c, ^bb2, ^bb3 ^bb2: %x = "test.val1"() : () -> i64 - br ^bb4(%x : i64) + cf.br ^bb4(%x : i64) ^bb3: %y = "test.val2"() : () -> i64 - br ^bb4(%y : i64) + cf.br ^bb4(%y : i64) ^bb4(%z : i64): scf.yield %z : i64 } @@ -1194,13 +1194,13 @@ // CHECK-NOT: execute_region // CHECK: "test.foo" // CHECK: %[[cmp:.+]] = "test.cmp" -// CHECK: cond_br %[[cmp]], ^[[bb1:.+]], ^[[bb2:.+]] +// CHECK: cf.cond_br %[[cmp]], ^[[bb1:.+]], ^[[bb2:.+]] // CHECK: ^[[bb1]]: // CHECK: %[[x:.+]] = "test.val1" -// CHECK: br ^[[bb3:.+]](%[[x]] : i64) +// CHECK: cf.br ^[[bb3:.+]](%[[x]] : i64) // CHECK: ^[[bb2]]: // CHECK: %[[y:.+]] = "test.val2" -// CHECK: br ^[[bb3]](%[[y:.+]] : i64) +// CHECK: cf.br ^[[bb3]](%[[y:.+]] : i64) // CHECK: ^[[bb3]](%[[z:.+]]: i64): // CHECK: "test.bar"(%[[z]]) // CHECK: return @@ -1213,7 +1213,7 @@ "test.foo"() : () -> () %v = scf.execute_region -> i64 { %c = "test.cmp"() : () -> i1 - cond_br %c, ^bb2, ^bb3 + cf.cond_br %c, ^bb2, ^bb3 ^bb2: %x = "test.val1"() : () -> i64 scf.yield %x : i64 @@ -1228,13 +1228,13 @@ // CHECK-NOT: execute_region // CHECK: "test.foo" // CHECK: %[[cmp:.+]] = "test.cmp" -// CHECK: cond_br %[[cmp]], ^[[bb1:.+]], ^[[bb2:.+]] +// CHECK: cf.cond_br %[[cmp]], ^[[bb1:.+]], ^[[bb2:.+]] // CHECK: ^[[bb1]]: // CHECK: %[[x:.+]] = "test.val1" -// CHECK: br ^[[bb3:.+]](%[[x]] : i64) +// CHECK: cf.br ^[[bb3:.+]](%[[x]] : i64) // CHECK: ^[[bb2]]: // CHECK: %[[y:.+]] = "test.val2" -// CHECK: br ^[[bb3]](%[[y:.+]] : i64) +// CHECK: cf.br ^[[bb3]](%[[y:.+]] : i64) // CHECK: ^[[bb3]](%[[z:.+]]: i64): // CHECK: "test.bar"(%[[z]]) // CHECK: return diff --git a/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir b/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir --- a/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir +++ b/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir @@ -113,7 +113,7 @@ // CHECK: %[[VAL_11:.*]] = arith.addi %[[VAL_9]], %[[VAL_4]] : index // CHECK: %[[VAL_12:.*]] = scf.execute_region -> i32 { // CHECK: %[[VAL_13:.*]] = arith.cmpi slt, %[[VAL_9]], %[[VAL_4]] : index -// CHECK: cond_br %[[VAL_13]], ^bb1, ^bb2 +// CHECK: cf.cond_br %[[VAL_13]], ^bb1, ^bb2 // CHECK: ^bb1: // CHECK: %[[VAL_14:.*]] = arith.subi %[[VAL_10]], %[[VAL_0]] : i32 // CHECK: scf.yield %[[VAL_14]] : i32 @@ -134,7 +134,7 @@ %0 = scf.for %i = %c0 to %arg1 step %c1 iter_args(%iarg0 = %arg0) -> i32 { %2 = scf.execute_region -> i32 { %1 = arith.cmpi slt, %i, %c1 : index - cond_br %1, ^bb1, ^bb2 + cf.cond_br %1, ^bb1, ^bb2 ^bb1: %2 = arith.subi %iarg0, %arg0 : i32 scf.yield %2 : i32 diff --git a/mlir/test/Dialect/SCF/ops.mlir b/mlir/test/Dialect/SCF/ops.mlir --- a/mlir/test/Dialect/SCF/ops.mlir +++ b/mlir/test/Dialect/SCF/ops.mlir @@ -298,13 +298,13 @@ } // CHECK: scf.execute_region { - // CHECK-NEXT: br ^bb1 + // CHECK-NEXT: cf.br ^bb1 // CHECK-NEXT: ^bb1: // CHECK-NEXT: scf.yield // CHECK-NEXT: } "scf.execute_region"() ({ ^bb0: - br ^bb1 + cf.br ^bb1 ^bb1: scf.yield }) : () -> () diff --git a/mlir/test/Dialect/Standard/canonicalize.mlir b/mlir/test/Dialect/Standard/canonicalize.mlir --- a/mlir/test/Dialect/Standard/canonicalize.mlir +++ b/mlir/test/Dialect/Standard/canonicalize.mlir @@ -64,28 +64,6 @@ // ----- -// CHECK-LABEL: @branchCondProp -// CHECK: %[[trueval:.+]] = arith.constant true -// CHECK: %[[falseval:.+]] = arith.constant false -// CHECK: "test.consumer1"(%[[trueval]]) : (i1) -> () -// CHECK: "test.consumer2"(%[[falseval]]) : (i1) -> () -func @branchCondProp(%arg0: i1) { - cond_br %arg0, ^trueB, ^falseB - -^trueB: - "test.consumer1"(%arg0) : (i1) -> () - br ^exit - -^falseB: - "test.consumer2"(%arg0) : (i1) -> () - br ^exit - -^exit: - return -} - -// ----- - // CHECK-LABEL: @selToNot // CHECK: %[[trueval:.+]] = arith.constant true // CHECK: %[[res:.+]] = arith.xori %arg0, %[[trueval]] : i1 diff --git a/mlir/test/Dialect/Standard/func-bufferize.mlir b/mlir/test/Dialect/Standard/func-bufferize.mlir --- a/mlir/test/Dialect/Standard/func-bufferize.mlir +++ b/mlir/test/Dialect/Standard/func-bufferize.mlir @@ -9,11 +9,11 @@ // CHECK-LABEL: func @block_arguments( // CHECK-SAME: %[[ARG:.*]]: memref) -> memref { -// CHECK: br ^bb1(%[[ARG]] : memref) +// CHECK: cf.br ^bb1(%[[ARG]] : memref) // CHECK: ^bb1(%[[BBARG:.*]]: memref): // CHECK: return %[[BBARG]] : memref func @block_arguments(%arg0: tensor) -> tensor { - br ^bb1(%arg0: tensor) + cf.br ^bb1(%arg0: tensor) ^bb1(%bbarg: tensor): return %bbarg : tensor } @@ -52,7 +52,7 @@ // update all terminators and issue an error if that is not possible. func @unable_to_update_terminator(%arg0: tensor) -> tensor { %0 = arith.constant true - cond_br %0, ^bb1(%arg0: tensor), ^bb2(%arg0: tensor) + cf.cond_br %0, ^bb1(%arg0: tensor), ^bb2(%arg0: tensor) ^bb1(%bbarg0: tensor): // expected-error @+1 {{failed to legalize operation 'test.terminator'}} "test.terminator"() : () -> () diff --git a/mlir/test/IR/invalid.mlir b/mlir/test/IR/invalid.mlir --- a/mlir/test/IR/invalid.mlir +++ b/mlir/test/IR/invalid.mlir @@ -111,7 +111,7 @@ func @bad_branch() { ^bb12: - br ^missing // expected-error {{reference to an undefined block}} + cf.br ^missing // expected-error {{reference to an undefined block}} } // ----- @@ -158,7 +158,7 @@ func @block_arg_no_close_paren() { ^bb42: - br ^bb2( // expected-error@+1 {{expected ':'}} + cf.br ^bb2( // expected-error@+1 {{expected ':'}} return } @@ -167,9 +167,9 @@ func @block_first_has_predecessor() { // expected-error@-1 {{entry block of region may not have predecessors}} ^bb42: - br ^bb43 + cf.br ^bb43 ^bb43: - br ^bb42 + cf.br ^bb42 } // ----- @@ -182,7 +182,7 @@ // ----- func @no_terminator() { - br ^bb1 + cf.br ^bb1 ^bb1: %x = arith.constant 0 : i32 %y = arith.constant 1 : i32 // expected-error {{block with no terminator}} @@ -368,7 +368,7 @@ func @argError() { ^bb1(%a: i64): // expected-note {{previously defined here}} - br ^bb2 + cf.br ^bb2 ^bb2(%a: i64): // expected-error{{redefinition of SSA value '%a'}} return } @@ -379,7 +379,7 @@ ^bb0: %0:2 = "foo"() : () -> (i1, i17) // expected-error @+1 {{branch has 2 operands for successor #0, but target block has 1}} - br ^bb1(%0#1, %0#0 : i17, i1) + cf.br ^bb1(%0#1, %0#0 : i17, i1) ^bb1(%x: i17): return @@ -391,7 +391,7 @@ ^bb0: %0 = "getBool"() : () -> i1 // expected-error @+1 {{type mismatch for bb argument #0 of successor #0}} - br ^bb1(%0 : i1) + cf.br ^bb1(%0 : i1) ^bb1(%x: i32): return @@ -409,7 +409,7 @@ func @condbr_notbool() { ^bb0: %a = "foo"() : () -> i32 // expected-note {{prior use here}} - cond_br %a, ^bb0, ^bb0 // expected-error {{use of value '%a' expects different type than prior uses: 'i1' vs 'i32'}} + cf.cond_br %a, ^bb0, ^bb0 // expected-error {{use of value '%a' expects different type than prior uses: 'i1' vs 'i32'}} } // ----- @@ -418,7 +418,7 @@ ^bb0: %c = "foo"() : () -> i1 %a = "foo"() : () -> i32 - cond_br %c, ^bb0(%a, %a : i32, ^bb0) // expected-error {{expected non-function type}} + cf.cond_br %c, ^bb0(%a, %a : i32, ^bb0) // expected-error {{expected non-function type}} } // ----- @@ -427,7 +427,7 @@ ^bb0: %c = "foo"() : () -> i1 %a = "foo"() : () -> i32 - cond_br %c, ^bb0(%a, %a : i32, i32), i32 // expected-error {{expected block name}} + cf.cond_br %c, ^bb0(%a, %a : i32, i32), i32 // expected-error {{expected block name}} } // ----- @@ -477,7 +477,7 @@ func @dominance_failure() { ^bb0: "foo"(%x) : (i32) -> () // expected-error {{operand #0 does not dominate this use}} - br ^bb1 + cf.br ^bb1 ^bb1: %x = "bar"() : () -> i32 // expected-note {{operand defined here (op in the same region)}} return @@ -489,7 +489,7 @@ ^bb0: "foo"(%x) : (i32) -> () // expected-error {{operand #0 does not dominate this use}} %x = "bar"() : () -> i32 // expected-note {{operand defined here (op in the same block)}} - br ^bb1 + cf.br ^bb1 ^bb1: return } @@ -508,7 +508,7 @@ func @dominance_failure() { // expected-note {{operand defined as a block argument (block #1 in the same region)}} ^bb0: - br ^bb1(%x : i32) // expected-error {{operand #0 does not dominate this use}} + cf.br ^bb1(%x : i32) // expected-error {{operand #0 does not dominate this use}} ^bb1(%x : i32): return } @@ -520,7 +520,7 @@ %f = "foo"() ({ "foo"(%x) : (i32) -> () // expected-error {{operand #0 does not dominate this use}} }) : () -> (i32) - br ^bb1(%f : i32) + cf.br ^bb1(%f : i32) ^bb1(%x : i32): return } @@ -988,7 +988,7 @@ "test.ssacfg_region"() ({ // expected-error @+1 {{operand #0 does not dominate this use}} "foo.use" (%1) : (i32) -> () - br ^bb2 + cf.br ^bb2 ^bb2: // expected-note @+1 {{operand defined here}} @@ -1588,7 +1588,7 @@ // ----- func @forward_reference_type_check() -> (i8) { - br ^bb2 + cf.br ^bb2 ^bb1: // expected-note @+1 {{previously used here with type 'i8'}} @@ -1597,7 +1597,7 @@ ^bb2: // expected-error @+1 {{definition of SSA value '%1#0' has type 'f32'}} %1 = "bar"() : () -> (f32) - br ^bb1 + cf.br ^bb1 } // ----- @@ -1610,9 +1610,9 @@ ^bb1: // expected-error @+1 {{operand #0 does not dominate this use}} %2:3 = "bar"(%1) : (i64) -> (i1,i1,i1) - br ^bb4 + cf.br ^bb4 ^bb2: - br ^bb2 + cf.br ^bb2 ^bb4: %1 = "foo"() : ()->i64 // expected-note {{operand defined here}} }) : () -> () diff --git a/mlir/test/IR/parser.mlir b/mlir/test/IR/parser.mlir --- a/mlir/test/IR/parser.mlir +++ b/mlir/test/IR/parser.mlir @@ -185,9 +185,9 @@ func @multiblock() { return // CHECK: return ^bb1: // CHECK: ^bb1: // no predecessors - br ^bb4 // CHECK: br ^bb3 + cf.br ^bb4 // CHECK: cf.br ^bb3 ^bb2: // CHECK: ^bb2: // pred: ^bb2 - br ^bb2 // CHECK: br ^bb2 + cf.br ^bb2 // CHECK: cf.br ^bb2 ^bb4: // CHECK: ^bb3: // pred: ^bb1 return // CHECK: return } // CHECK: } @@ -416,7 +416,7 @@ func @ssa_values() -> (i16, i8) { // CHECK: %{{.*}}:2 = "foo"() : () -> (i1, i17) %0:2 = "foo"() : () -> (i1, i17) - br ^bb2 + cf.br ^bb2 ^bb1: // CHECK: ^bb1: // pred: ^bb2 // CHECK: %{{.*}}:2 = "baz"(%{{.*}}#1, %{{.*}}#0, %{{.*}}#1) : (f32, i11, i17) -> (i16, i8) @@ -428,14 +428,14 @@ ^bb2: // CHECK: ^bb2: // pred: ^bb0 // CHECK: %{{.*}}:2 = "bar"(%{{.*}}#0, %{{.*}}#1) : (i1, i17) -> (i11, f32) %2:2 = "bar"(%0#0, %0#1) : (i1, i17) -> (i11, f32) - br ^bb1 + cf.br ^bb1 } // CHECK-LABEL: func @bbargs() -> (i16, i8) { func @bbargs() -> (i16, i8) { // CHECK: %{{.*}}:2 = "foo"() : () -> (i1, i17) %0:2 = "foo"() : () -> (i1, i17) - br ^bb1(%0#1, %0#0 : i17, i1) + cf.br ^bb1(%0#1, %0#0 : i17, i1) ^bb1(%x: i17, %y: i1): // CHECK: ^bb1(%{{.*}}: i17, %{{.*}}: i1): // CHECK: %{{.*}}:2 = "baz"(%{{.*}}, %{{.*}}, %{{.*}}#1) : (i17, i1, i17) -> (i16, i8) @@ -446,12 +446,12 @@ // CHECK-LABEL: func @verbose_terminators() -> (i1, i17) func @verbose_terminators() -> (i1, i17) { %0:2 = "foo"() : () -> (i1, i17) -// CHECK: br ^bb1(%{{.*}}#0, %{{.*}}#1 : i1, i17) - "std.br"(%0#0, %0#1)[^bb1] : (i1, i17) -> () +// CHECK: cf.br ^bb1(%{{.*}}#0, %{{.*}}#1 : i1, i17) + "cf.br"(%0#0, %0#1)[^bb1] : (i1, i17) -> () ^bb1(%x : i1, %y : i17): -// CHECK: cond_br %{{.*}}, ^bb2(%{{.*}} : i17), ^bb3(%{{.*}}, %{{.*}} : i1, i17) - "std.cond_br"(%x, %y, %x, %y) [^bb2, ^bb3] {operand_segment_sizes = dense<[1, 1, 2]>: vector<3xi32>} : (i1, i17, i1, i17) -> () +// CHECK: cf.cond_br %{{.*}}, ^bb2(%{{.*}} : i17), ^bb3(%{{.*}}, %{{.*}} : i1, i17) + "cf.cond_br"(%x, %y, %x, %y) [^bb2, ^bb3] {operand_segment_sizes = dense<[1, 1, 2]>: vector<3xi32>} : (i1, i17, i1, i17) -> () ^bb2(%a : i17): %true = arith.constant true @@ -468,12 +468,12 @@ %cond = "foo"() : () -> i1 %a = "bar"() : () -> i32 %b = "bar"() : () -> i64 - // CHECK: cond_br %{{.*}}, ^bb1(%{{.*}} : i32), ^bb2(%{{.*}} : i64) - cond_br %cond, ^bb1(%a : i32), ^bb2(%b : i64) + // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}} : i32), ^bb2(%{{.*}} : i64) + cf.cond_br %cond, ^bb1(%a : i32), ^bb2(%b : i64) // CHECK: ^bb1({{.*}}: i32): // pred: ^bb0 ^bb1(%x : i32): - br ^bb2(%b: i64) + cf.br ^bb2(%b: i64) // CHECK: ^bb2({{.*}}: i64): // 2 preds: ^bb0, ^bb1 ^bb2(%y : i64): @@ -486,8 +486,8 @@ %cond = "foo"() : () -> i1 %a = "bar"() : () -> i32 %b = "bar"() : () -> i64 - // CHECK: cond_br %{{.*}}, ^bb1(%{{.*}}, %{{.*}} : i32, i64), ^bb2(%{{.*}}, %{{.*}}, %{{.*}} : i64, i32, i32) - cond_br %cond, ^bb1(%a, %b : i32, i64), ^bb2(%b, %a, %a : i64, i32, i32) + // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}, %{{.*}} : i32, i64), ^bb2(%{{.*}}, %{{.*}}, %{{.*}} : i64, i32, i32) + cf.cond_br %cond, ^bb1(%a, %b : i32, i64), ^bb2(%b, %a, %a : i64, i32, i32) ^bb1(%x : i32, %y : i64): return %x : i32 @@ -1279,15 +1279,14 @@ // TODO: remove this after removing the special casing for std in the printer. // Verify that operations in the standard dialect keep the `std.` prefix. - // CHECK: std.assert - assert %bool, "Assertion" + // CHECK: cf.assert + cf.assert %bool, "Assertion" "test.terminator"() : ()->() } // The same operation outside of the region does not have an std. prefix. // CHECK-NOT: std.assert - // CHECK: assert - assert %bool, "Assertion" - return + // CHECK: return + std.return } // CHECK-LABEL: func @unreachable_dominance_violation_ok @@ -1296,9 +1295,9 @@ // CHECK: return [[VAL]] : i1 // CHECK: ^bb1: // no predecessors // CHECK: [[VAL2:%.*]]:3 = "bar"([[VAL3:%.*]]) : (i64) -> (i1, i1, i1) -// CHECK: br ^bb3 +// CHECK: cf.br ^bb3 // CHECK: ^bb2: // pred: ^bb2 -// CHECK: br ^bb2 +// CHECK: cf.br ^bb2 // CHECK: ^bb3: // pred: ^bb1 // CHECK: [[VAL3]] = "foo"() : () -> i64 // CHECK: return [[VAL2]]#1 : i1 @@ -1308,9 +1307,9 @@ ^bb1: // %1 is not dominated by it's definition, but block is not reachable. %2:3 = "bar"(%1) : (i64) -> (i1,i1,i1) - br ^bb3 + cf.br ^bb3 ^bb2: - br ^bb2 + cf.br ^bb2 ^bb3: %1 = "foo"() : ()->i64 return %2#1 : i1 @@ -1318,28 +1317,28 @@ // CHECK-LABEL: func @graph_region_in_hierarchy_ok func @graph_region_in_hierarchy_ok() -> i64 { -// CHECK: br ^bb2 +// CHECK: cf.br ^bb2 // CHECK: ^bb1: // CHECK: test.graph_region { // CHECK: [[VAL2:%.*]]:3 = "bar"([[VAL3:%.*]]) : (i64) -> (i1, i1, i1) // CHECK: } -// CHECK: br ^bb3 +// CHECK: cf.br ^bb3 // CHECK: ^bb2: // pred: ^bb0 // CHECK: [[VAL3]] = "foo"() : () -> i64 -// CHECK: br ^bb1 +// CHECK: cf.br ^bb1 // CHECK: ^bb3: // pred: ^bb1 // CHECK: return [[VAL3]] : i64 // CHECK: } - br ^bb2 + cf.br ^bb2 ^bb1: test.graph_region { // %1 is well-defined here, since bb2 dominates bb1. %2:3 = "bar"(%1) : (i64) -> (i1,i1,i1) } - br ^bb4 + cf.br ^bb4 ^bb2: %1 = "foo"() : ()->i64 - br ^bb1 + cf.br ^bb1 ^bb4: return %1 : i64 } diff --git a/mlir/test/IR/region.mlir b/mlir/test/IR/region.mlir --- a/mlir/test/IR/region.mlir +++ b/mlir/test/IR/region.mlir @@ -46,13 +46,13 @@ "test.sized_region_op"() ( { "work"() : () -> () - br ^next1 + cf.br ^next1 ^next1: "work"() : () -> () }, { "work"() : () -> () - br ^next2 + cf.br ^next2 ^next2: "work"() : () -> () }) : () -> () diff --git a/mlir/test/IR/traits.mlir b/mlir/test/IR/traits.mlir --- a/mlir/test/IR/traits.mlir +++ b/mlir/test/IR/traits.mlir @@ -529,9 +529,9 @@ ^bb1: // expected-error @+1 {{operand #0 does not dominate this use}} %2:3 = "bar"(%1) : (i64) -> (i1,i1,i1) - br ^bb4 + cf.br ^bb4 ^bb2: - br ^bb2 + cf.br ^bb2 ^bb4: %1 = "foo"() : ()->i64 // expected-note {{operand defined here}} return %2#1 : i1 @@ -559,7 +559,7 @@ test.graph_region { // expected-error@-1 {{'test.graph_region' op expects graph region #0 to have 0 or 1 blocks}} ^bb42: - br ^bb43 + cf.br ^bb43 ^bb43: "terminator"() : () -> () } diff --git a/mlir/test/IR/visitors.mlir b/mlir/test/IR/visitors.mlir --- a/mlir/test/IR/visitors.mlir +++ b/mlir/test/IR/visitors.mlir @@ -116,10 +116,10 @@ "regionOp0"() ({ ^bb0: "op0"() : () -> () - br ^bb2 + cf.br ^bb2 ^bb1: "op1"() : () -> () - br ^bb2 + cf.br ^bb2 ^bb2: "op2"() : () -> () }) : () -> () @@ -131,9 +131,9 @@ // CHECK: Visiting op 'builtin.func' // CHECK: Visiting op 'regionOp0' // CHECK: Visiting op 'op0' -// CHECK: Visiting op 'std.br' +// CHECK: Visiting op 'cf.br' // CHECK: Visiting op 'op1' -// CHECK: Visiting op 'std.br' +// CHECK: Visiting op 'cf.br' // CHECK: Visiting op 'op2' // CHECK: Visiting op 'std.return' @@ -151,9 +151,9 @@ // CHECK-LABEL: Op post-order visits // CHECK: Visiting op 'op0' -// CHECK: Visiting op 'std.br' +// CHECK: Visiting op 'cf.br' // CHECK: Visiting op 'op1' -// CHECK: Visiting op 'std.br' +// CHECK: Visiting op 'cf.br' // CHECK: Visiting op 'op2' // CHECK: Visiting op 'regionOp0' // CHECK: Visiting op 'std.return' @@ -183,9 +183,9 @@ // CHECK-LABEL: Op post-order erasures (skip) // CHECK: Erasing op 'op0' -// CHECK: Erasing op 'std.br' +// CHECK: Erasing op 'cf.br' // CHECK: Erasing op 'op1' -// CHECK: Erasing op 'std.br' +// CHECK: Erasing op 'cf.br' // CHECK: Erasing op 'op2' // CHECK: Erasing op 'regionOp0' // CHECK: Erasing op 'std.return' @@ -197,9 +197,9 @@ // CHECK-LABEL: Op post-order erasures (no skip) // CHECK: Erasing op 'op0' -// CHECK: Erasing op 'std.br' +// CHECK: Erasing op 'cf.br' // CHECK: Erasing op 'op1' -// CHECK: Erasing op 'std.br' +// CHECK: Erasing op 'cf.br' // CHECK: Erasing op 'op2' // CHECK: Erasing op 'regionOp0' // CHECK: Erasing op 'std.return' diff --git a/mlir/test/Integration/Dialect/Async/CPU/microbench-linalg-async-parallel-for.mlir b/mlir/test/Integration/Dialect/Async/CPU/microbench-linalg-async-parallel-for.mlir --- a/mlir/test/Integration/Dialect/Async/CPU/microbench-linalg-async-parallel-for.mlir +++ b/mlir/test/Integration/Dialect/Async/CPU/microbench-linalg-async-parallel-for.mlir @@ -5,7 +5,7 @@ // RUN: -async-runtime-ref-counting \ // RUN: -async-runtime-ref-counting-opt \ // RUN: -convert-async-to-llvm \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -arith-expand \ // RUN: -memref-expand \ // RUN: -convert-vector-to-llvm \ @@ -21,7 +21,7 @@ // RUN: mlir-opt %s \ // RUN: -convert-linalg-to-loops \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-vector-to-llvm \ // RUN: -convert-memref-to-llvm \ // RUN: -convert-std-to-llvm \ diff --git a/mlir/test/Integration/Dialect/Async/CPU/microbench-scf-async-parallel-for.mlir b/mlir/test/Integration/Dialect/Async/CPU/microbench-scf-async-parallel-for.mlir --- a/mlir/test/Integration/Dialect/Async/CPU/microbench-scf-async-parallel-for.mlir +++ b/mlir/test/Integration/Dialect/Async/CPU/microbench-scf-async-parallel-for.mlir @@ -5,7 +5,7 @@ // RUN: -async-runtime-ref-counting-opt \ // RUN: -convert-async-to-llvm \ // RUN: -convert-linalg-to-loops \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -arith-expand \ // RUN: -memref-expand \ // RUN: -convert-vector-to-llvm \ @@ -26,7 +26,7 @@ // RUN: -async-runtime-ref-counting-opt \ // RUN: -convert-async-to-llvm \ // RUN: -convert-linalg-to-loops \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -arith-expand \ // RUN: -memref-expand \ // RUN: -convert-vector-to-llvm \ @@ -42,7 +42,7 @@ // RUN: mlir-opt %s \ // RUN: -convert-linalg-to-loops \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-vector-to-llvm \ // RUN: -convert-memref-to-llvm \ // RUN: -convert-std-to-llvm \ diff --git a/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-1d.mlir b/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-1d.mlir --- a/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-1d.mlir +++ b/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-1d.mlir @@ -3,7 +3,7 @@ // RUN: -async-runtime-ref-counting \ // RUN: -async-runtime-ref-counting-opt \ // RUN: -convert-async-to-llvm \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-memref-to-llvm \ // RUN: -arith-expand \ // RUN: -memref-expand \ @@ -19,7 +19,7 @@ // RUN: -async-to-async-runtime \ // RUN: -async-runtime-policy-based-ref-counting \ // RUN: -convert-async-to-llvm \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-memref-to-llvm \ // RUN: -arith-expand \ // RUN: -memref-expand \ @@ -38,7 +38,7 @@ // RUN: -async-runtime-ref-counting \ // RUN: -async-runtime-ref-counting-opt \ // RUN: -convert-async-to-llvm \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-memref-to-llvm \ // RUN: -arith-expand \ // RUN: -memref-expand \ @@ -125,7 +125,7 @@ scf.parallel (%i) = (%lb1) to (%ub1) step (%c1) { %false = arith.constant 0 : i1 - assert %false, "should never be executed" + cf.assert %false, "should never be executed" } memref.dealloc %A : memref<9xf32> diff --git a/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-2d.mlir b/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-2d.mlir --- a/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-2d.mlir +++ b/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-2d.mlir @@ -4,7 +4,7 @@ // RUN: -async-runtime-ref-counting-opt \ // RUN: -arith-expand \ // RUN: -convert-async-to-llvm \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-memref-to-llvm \ // RUN: -convert-std-to-llvm \ // RUN: -reconcile-unrealized-casts \ @@ -19,7 +19,7 @@ // RUN: -async-runtime-policy-based-ref-counting \ // RUN: -arith-expand \ // RUN: -convert-async-to-llvm \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-memref-to-llvm \ // RUN: -convert-std-to-llvm \ // RUN: -reconcile-unrealized-casts \ @@ -37,7 +37,7 @@ // RUN: -async-runtime-ref-counting-opt \ // RUN: -arith-expand \ // RUN: -convert-async-to-llvm \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-memref-to-llvm \ // RUN: -convert-std-to-llvm \ // RUN: -reconcile-unrealized-casts \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir @@ -5,7 +5,7 @@ // RUN: mlir-opt -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=memref.copy register-tile-sizes=4,32 vectorize" | \ // RUN: mlir-opt -canonicalize -convert-vector-to-scf -lower-affine -convert-linalg-to-loops | \ -// RUN: mlir-opt -canonicalize -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt -canonicalize -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \ // Activate to dump assembly // R_UN: -dump-object-file -object-filename=/tmp/a.o \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/matmul-vs-matvec.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/matmul-vs-matvec.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/matmul-vs-matvec.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/matmul-vs-matvec.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s @@ -56,7 +56,7 @@ %e1 = memref.load %C1[%i, %j] : memref %e2 = memref.load %C2[%i, %j] : memref %c = arith.cmpf oeq, %e1, %e2 : f32 - assert %c, "Matmul does not produce same output as matvec" + cf.assert %c, "Matmul does not produce same output as matvec" } } %C2_ = memref.cast %C2 : memref to memref<*xf32> diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/rank-reducing-subview.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/rank-reducing-subview.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/rank-reducing-subview.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/rank-reducing-subview.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s -canonicalize -cse -linalg-comprehensive-module-bufferize |\ // RUN: mlir-opt -buffer-deallocation -convert-vector-to-scf -lower-affine -convert-linalg-to-loops |\ -// RUN: mlir-opt -canonicalize -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt -canonicalize -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext |\ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir @@ -1,10 +1,10 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -// RUN: mlir-opt %s -linalg-tile="tile-sizes=4" -convert-linalg-to-loops -convert-scf-to-std \ -// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -linalg-tile="tile-sizes=4" -convert-linalg-to-loops -convert-scf-to-cf \ +// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir @@ -1,10 +1,10 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -// RUN: mlir-opt %s -linalg-tile="tile-sizes=2,4" -convert-linalg-to-loops -convert-scf-to-std \ -// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -linalg-tile="tile-sizes=2,4" -convert-linalg-to-loops -convert-scf-to-cf \ +// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir @@ -1,10 +1,10 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -// RUN: mlir-opt %s -linalg-tile="tile-sizes=2,2" -convert-linalg-to-loops -convert-scf-to-std \ -// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -linalg-tile="tile-sizes=2,2" -convert-linalg-to-loops -convert-scf-to-cf \ +// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir @@ -1,10 +1,10 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -// RUN: mlir-opt %s -linalg-tile="tile-sizes=2,3,3,2" -convert-linalg-to-loops -convert-scf-to-std \ -// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -linalg-tile="tile-sizes=2,3,3,2" -convert-linalg-to-loops -convert-scf-to-cf \ +// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir @@ -1,10 +1,10 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -// RUN: mlir-opt %s -linalg-tile="tile-sizes=2,2,2" -convert-linalg-to-loops -convert-scf-to-std \ -// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -linalg-tile="tile-sizes=2,2,2" -convert-linalg-to-loops -convert-scf-to-cf \ +// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir @@ -1,10 +1,10 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -// RUN: mlir-opt %s -linalg-tile="tile-sizes=0,5,5,5" -convert-linalg-to-loops -convert-scf-to-std \ -// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -linalg-tile="tile-sizes=0,5,5,5" -convert-linalg-to-loops -convert-scf-to-cf \ +// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -linalg-bufferize \ // RUN: -arith-bufferize -tensor-bufferize -func-bufferize \ // RUN: -finalizing-bufferize -buffer-deallocation \ -// RUN: -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -linalg-bufferize \ // RUN: -arith-bufferize -tensor-bufferize -func-bufferize \ // RUN: -finalizing-bufferize -buffer-deallocation \ -// RUN: -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -linalg-bufferize \ // RUN: -arith-bufferize -tensor-bufferize -func-bufferize \ // RUN: -finalizing-bufferize -buffer-deallocation \ -// RUN: -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir @@ -1,7 +1,7 @@ // UNSUPPORTED: asan // RUN: mlir-opt %s -linalg-bufferize -arith-bufferize \ -// RUN: -tensor-bufferize -func-bufferize -finalizing-bufferize -buffer-deallocation -convert-linalg-to-loops -convert-scf-to-std \ -// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: -tensor-bufferize -func-bufferize -finalizing-bufferize -buffer-deallocation -convert-linalg-to-loops -convert-scf-to-cf \ +// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s @@ -9,8 +9,8 @@ // RUN: mlir-opt %s -linalg-tile="tile-sizes=1,2,3" -linalg-bufferize \ // RUN: -scf-bufferize -arith-bufferize -tensor-bufferize \ // RUN: -func-bufferize \ -// RUN: -finalizing-bufferize -convert-linalg-to-loops -convert-scf-to-std -convert-scf-to-std \ -// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: -finalizing-bufferize -convert-linalg-to-loops -convert-scf-to-cf -convert-scf-to-cf \ +// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-cf --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/Integration/Dialect/Memref/memref_abi.c b/mlir/test/Integration/Dialect/Memref/memref_abi.c --- a/mlir/test/Integration/Dialect/Memref/memref_abi.c +++ b/mlir/test/Integration/Dialect/Memref/memref_abi.c @@ -3,7 +3,7 @@ // Compile the MLIR file to LLVM: // RUN: mlir-opt %t/input.mlir \ -// RUN: -lower-affine -convert-scf-to-std -convert-memref-to-llvm \ +// RUN: -lower-affine -convert-scf-to-cf -convert-memref-to-llvm \ // RUN: -convert-std-to-llvm -reconcile-unrealized-casts \ // RUN: | mlir-translate --mlir-to-llvmir -o %t.ll diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py @@ -127,7 +127,7 @@ f'sparsification{{{options}}},' f'sparse-tensor-conversion,' f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' - f'convert-scf-to-std,' + f'convert-scf-to-cf,' f'func-bufferize,' f'arith-bufferize,' f'builtin.func(tensor-bufferize,finalizing-bufferize),' diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py @@ -117,7 +117,7 @@ f'sparsification{{{options}}},' f'sparse-tensor-conversion,' f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' - f'convert-scf-to-std,' + f'convert-scf-to-cf,' f'func-bufferize,' f'arith-bufferize,' f'builtin.func(tensor-bufferize,finalizing-bufferize),' diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py @@ -69,7 +69,7 @@ f'sparsification,' f'sparse-tensor-conversion,' f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' - f'convert-scf-to-std,' + f'convert-scf-to-cf,' f'func-bufferize,' f'arith-bufferize,' f'builtin.func(tensor-bufferize,finalizing-bufferize),' diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py @@ -77,7 +77,7 @@ f'sparsification,' f'sparse-tensor-conversion,' f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' - f'convert-scf-to-std,' + f'convert-scf-to-cf,' f'func-bufferize,' f'arith-bufferize,' f'builtin.func(tensor-bufferize,finalizing-bufferize),' diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py @@ -175,7 +175,7 @@ f'sparsification{{{sparsification_options}}},' f'sparse-tensor-conversion,' f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' - f'convert-scf-to-std,' + f'convert-scf-to-cf,' f'func-bufferize,' f'arith-bufferize,' f'builtin.func(tensor-bufferize,finalizing-bufferize),' diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py @@ -158,7 +158,7 @@ f"sparsification," f"sparse-tensor-conversion," f"builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf)," - f"convert-scf-to-std," + f"convert-scf-to-cf," f"func-bufferize," f"arith-bufferize," f"builtin.func(tensor-bufferize,finalizing-bufferize)," diff --git a/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir b/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir --- a/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir +++ b/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf \ // RUN: -memref-expand -arith-expand -convert-vector-to-llvm \ // RUN: -convert-memref-to-llvm -convert-std-to-llvm \ // RUN: -reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-mulf-full.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-mulf-full.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-mulf-full.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-mulf-full.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf \ // RUN: -arith-bufferize -convert-vector-to-llvm="enable-amx" \ // RUN: -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate -mlir-to-llvmir | \ diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-mulf.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-mulf.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-mulf.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-mulf.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm="enable-amx" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm="enable-amx" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate -mlir-to-llvmir | \ // RUN: %lli --entry-function=entry --mattr="+amx-tile,+amx-int8,+amx-bf16" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli-ext.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli-ext.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli-ext.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli-ext.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm="enable-amx" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm="enable-amx" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate -mlir-to-llvmir | \ // RUN: %lli --entry-function=entry --mattr="+amx-tile,+amx-int8,+amx-bf16" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli-full.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli-full.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli-full.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli-full.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf \ // RUN: -arith-bufferize -convert-vector-to-llvm="enable-amx" \ // RUN: -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate -mlir-to-llvmir | \ diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-muli.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm="enable-amx" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm="enable-amx" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate -mlir-to-llvmir | \ // RUN: %lli --entry-function=entry --mattr="+amx-tile,+amx-int8,+amx-bf16" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-tilezero-block.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-tilezero-block.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-tilezero-block.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-tilezero-block.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm="enable-amx" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm="enable-amx" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate -mlir-to-llvmir | \ // RUN: %lli --entry-function=entry --mattr="+amx-tile,+amx-int8,+amx-bf16" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-tilezero.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-tilezero.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-tilezero.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/test-tilezero.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm="enable-amx" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm="enable-amx" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate -mlir-to-llvmir | \ // RUN: %lli --entry-function=entry --mattr="+amx-tile,+amx-int8,+amx-bf16" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-dot.mlir b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-dot.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-dot.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-dot.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm="enable-x86vector" -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm="enable-x86vector" -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate --mlir-to-llvmir | \ // RUN: %lli --entry-function=entry --mattr="avx" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-inline-asm-vector-avx512.mlir b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-inline-asm-vector-avx512.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-inline-asm-vector-avx512.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-inline-asm-vector-avx512.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm='use-bare-ptr-memref-call-conv=1' -convert-arith-to-llvm -reconcile-unrealized-casts |\ +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm='use-bare-ptr-memref-call-conv=1' -convert-arith-to-llvm -reconcile-unrealized-casts |\ // RUN: mlir-translate --mlir-to-llvmir |\ // RUN: %lli --entry-function=entry --mattr="avx512f" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext |\ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-mask-compress.mlir b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-mask-compress.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-mask-compress.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-mask-compress.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm="enable-x86vector" -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm="enable-x86vector" -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate --mlir-to-llvmir | \ // RUN: %lli --entry-function=entry --mattr="avx512bw" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-sparse-dot-product.mlir b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-sparse-dot-product.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-sparse-dot-product.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-sparse-dot-product.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm="enable-x86vector" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm="enable-x86vector" -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate --mlir-to-llvmir | \ // RUN: %lli --entry-function=entry --mattr="avx512bw,avx512vp2intersect" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-vp2intersect-i32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-vp2intersect-i32.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-vp2intersect-i32.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-vp2intersect-i32.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm="enable-x86vector" -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm="enable-x86vector" -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-translate --mlir-to-llvmir | \ // RUN: %lli --entry-function=entry --mattr="avx512bw,avx512vp2intersect" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-0-d-vectors.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-0-d-vectors.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-0-d-vectors.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-0-d-vectors.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-broadcast.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-broadcast.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-broadcast.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-broadcast.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-compress.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-compress.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-compress.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-compress.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-constant-mask.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-constant-mask.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-constant-mask.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-constant-mask.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-contraction.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-contraction.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-contraction.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-contraction.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-create-mask-v4i1.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-create-mask-v4i1.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-create-mask-v4i1.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-create-mask-v4i1.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-create-mask.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-create-mask.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-create-mask.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-create-mask.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-expand.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-expand.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-expand.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-expand.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-extract-strided-slice.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-extract-strided-slice.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-extract-strided-slice.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-extract-strided-slice.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-flat-transpose-col.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-flat-transpose-col.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-flat-transpose-col.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-flat-transpose-col.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -lower-matrix-intrinsics -matrix-allow-contract -matrix-default-layout=column-major \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-flat-transpose-row.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-flat-transpose-row.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-flat-transpose-row.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-flat-transpose-row.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -lower-matrix-intrinsics -matrix-allow-contract -matrix-default-layout=row-major \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-fma.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-fma.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-fma.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-fma.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-gather.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-gather.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-gather.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-gather.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-insert-strided-slice.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-insert-strided-slice.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-insert-strided-slice.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-insert-strided-slice.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-maskedload.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-maskedload.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-maskedload.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-maskedload.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-maskedstore.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-maskedstore.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-maskedstore.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-maskedstore.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-matrix-multiply-col.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-matrix-multiply-col.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-matrix-multiply-col.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-matrix-multiply-col.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -lower-matrix-intrinsics -matrix-allow-contract -matrix-default-layout=column-major \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-matrix-multiply-row.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-matrix-multiply-row.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-matrix-multiply-row.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-matrix-multiply-row.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -lower-matrix-intrinsics -matrix-allow-contract -matrix-default-layout=row-major \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-outerproduct-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-outerproduct-f32.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-outerproduct-f32.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-outerproduct-f32.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-outerproduct-i64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-outerproduct-i64.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-outerproduct-i64.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-outerproduct-i64.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-print-int.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-print-int.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-print-int.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-print-int.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std \ +// RUN: mlir-opt %s -convert-scf-to-cf \ // RUN: -convert-vector-to-llvm='reassociate-fp-reductions' \ // RUN: -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std \ +// RUN: mlir-opt %s -convert-scf-to-cf \ // RUN: -convert-vector-to-llvm='reassociate-fp-reductions' \ // RUN: -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i32.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i32.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i32.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i4.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i4.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i4.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i64.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i64.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-i64.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-si4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-si4.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-si4.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-si4.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-ui4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-ui4.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-ui4.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-ui4.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-scan.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-scan.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-scan.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-scan.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -test-vector-scan-lowering -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -test-vector-scan-lowering -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-scatter.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-scatter.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-scatter.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-scatter.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-shape-cast.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-shape-cast.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-shape-cast.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-shape-cast.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-shuffle.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-shuffle.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-shuffle.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-shuffle.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-sparse-dot-matvec.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-sparse-dot-matvec.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-sparse-dot-matvec.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-sparse-dot-matvec.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-sparse-saxpy-jagged-matvec.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-sparse-saxpy-jagged-matvec.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-sparse-saxpy-jagged-matvec.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-sparse-saxpy-jagged-matvec.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir @@ -1,19 +1,19 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir @@ -1,19 +1,19 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir @@ -1,19 +1,19 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir @@ -1,9 +1,9 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir @@ -1,9 +1,9 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-write.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-write.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-write.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-write.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transpose.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transpose.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transpose.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transpose.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-vector-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir @@ -1,11 +1,11 @@ // RUN: mlir-opt %s -test-vector-to-forloop -convert-vector-to-scf \ -// RUN: -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ +// RUN: -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | \ // RUN: FileCheck %s // RUN: mlir-opt %s -convert-vector-to-scf -lower-affine \ -// RUN: -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e main \ +// RUN: -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e main \ // RUN: -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir --- a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir +++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: -gpu-kernel-outlining \ // RUN: -pass-pipeline='gpu.module(strip-debuginfo,convert-gpu-to-nvvm,gpu-to-cubin{chip=sm_70})' \ -// RUN: --convert-scf-to-std -gpu-to-llvm \ +// RUN: --convert-scf-to-cf -gpu-to-llvm \ // RUN: | mlir-cpu-runner \ // RUN: --shared-libs=%linalg_test_lib_dir/libmlir_cuda_runtime%shlibext \ // RUN: --shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext \ diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir --- a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir +++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: -gpu-kernel-outlining \ // RUN: -pass-pipeline='gpu.module(strip-debuginfo,convert-gpu-to-nvvm,gpu-to-cubin{chip=sm_70})' \ -// RUN: --convert-scf-to-std -gpu-to-llvm \ +// RUN: --convert-scf-to-cf -gpu-to-llvm \ // RUN: | mlir-cpu-runner \ // RUN: --shared-libs=%linalg_test_lib_dir/libmlir_cuda_runtime%shlibext \ // RUN: --shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext \ diff --git a/mlir/test/Integration/GPU/CUDA/shuffle.mlir b/mlir/test/Integration/GPU/CUDA/shuffle.mlir --- a/mlir/test/Integration/GPU/CUDA/shuffle.mlir +++ b/mlir/test/Integration/GPU/CUDA/shuffle.mlir @@ -24,10 +24,10 @@ %width = arith.index_cast %block_x : index to i32 %offset = arith.constant 4 : i32 %shfl, %valid = gpu.shuffle xor %val, %offset, %width : f32 - cond_br %valid, ^bb1(%shfl : f32), ^bb0 + cf.cond_br %valid, ^bb1(%shfl : f32), ^bb0 ^bb0: %m1 = arith.constant -1.0 : f32 - br ^bb1(%m1 : f32) + cf.br ^bb1(%m1 : f32) ^bb1(%value : f32): memref.store %value, %dst[%tx] : memref gpu.terminator diff --git a/mlir/test/Integration/GPU/ROCM/vecadd.mlir b/mlir/test/Integration/GPU/ROCM/vecadd.mlir --- a/mlir/test/Integration/GPU/ROCM/vecadd.mlir +++ b/mlir/test/Integration/GPU/ROCM/vecadd.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -gpu-kernel-outlining \ // RUN: -pass-pipeline='gpu.module(strip-debuginfo,convert-gpu-to-rocdl,gpu-to-hsaco{chip=%chip})' \ // RUN: -gpu-to-llvm \ diff --git a/mlir/test/Integration/GPU/ROCM/vector-transferops.mlir b/mlir/test/Integration/GPU/ROCM/vector-transferops.mlir --- a/mlir/test/Integration/GPU/ROCM/vector-transferops.mlir +++ b/mlir/test/Integration/GPU/ROCM/vector-transferops.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -gpu-kernel-outlining \ // RUN: -pass-pipeline='gpu.module(strip-debuginfo,convert-gpu-to-rocdl,gpu-to-hsaco{chip=%chip})' \ // RUN: -gpu-to-llvm \ diff --git a/mlir/test/Target/Cpp/control_flow.mlir b/mlir/test/Target/Cpp/control_flow.mlir --- a/mlir/test/Target/Cpp/control_flow.mlir +++ b/mlir/test/Target/Cpp/control_flow.mlir @@ -4,14 +4,14 @@ // simple(10, false) -> 30 func @simple(i64, i1) -> i64 { ^bb0(%a: i64, %cond: i1): - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: - br ^bb3(%a: i64) + cf.br ^bb3(%a: i64) ^bb2: %b = emitc.call "add"(%a, %a) : (i64, i64) -> i64 - br ^bb3(%b: i64) + cf.br ^bb3(%b: i64) ^bb3(%c: i64): - br ^bb4(%c, %a : i64, i64) + cf.br ^bb4(%c, %a : i64, i64) ^bb4(%d : i64, %e : i64): %0 = emitc.call "add"(%d, %e) : (i64, i64) -> i64 return %0 : i64 @@ -45,7 +45,7 @@ func @block_labels0() { ^bb1: - br ^bb2 + cf.br ^bb2 ^bb2: return } @@ -59,7 +59,7 @@ // Repeat the same function to make sure the names of the block labels get reset. func @block_labels1() { ^bb1: - br ^bb2 + cf.br ^bb2 ^bb2: return } diff --git a/mlir/test/Target/Cpp/invalid.mlir b/mlir/test/Target/Cpp/invalid.mlir --- a/mlir/test/Target/Cpp/invalid.mlir +++ b/mlir/test/Target/Cpp/invalid.mlir @@ -3,7 +3,7 @@ // expected-error@+1 {{'builtin.func' op with multiple blocks needs variables declared at top}} func @multiple_blocks() { ^bb1: - br ^bb2 + cf.br ^bb2 ^bb2: return } diff --git a/mlir/test/Transforms/buffer-hoisting.mlir b/mlir/test/Transforms/buffer-hoisting.mlir --- a/mlir/test/Transforms/buffer-hoisting.mlir +++ b/mlir/test/Transforms/buffer-hoisting.mlir @@ -14,20 +14,20 @@ // CHECK-LABEL: func @condBranch func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } // CHECK-NEXT: %[[ALLOC:.*]] = memref.alloc() -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // ----- @@ -47,19 +47,19 @@ %arg1: memref, %arg2: memref, %arg3: index) { - cond_br %arg0, ^bb1, ^bb2(%arg3: index) + cf.cond_br %arg0, ^bb1, ^bb2(%arg3: index) ^bb1: - br ^bb3(%arg1 : memref) + cf.br ^bb3(%arg1 : memref) ^bb2(%0: index): %1 = memref.alloc(%0) : memref test.buffer_based in(%arg1: memref) out(%1: memref) - br ^bb3(%1 : memref) + cf.br ^bb3(%1 : memref) ^bb3(%2: memref): test.copy(%2, %arg2) : (memref, memref) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: ^bb2 // CHECK: ^bb2(%[[IDX:.*]]:{{.*}}) // CHECK-NEXT: %[[ALLOC0:.*]] = memref.alloc(%[[IDX]]) @@ -89,27 +89,27 @@ %arg1: memref, %arg2: memref, %arg3: index) { - cond_br %arg0, ^bb1, ^bb2(%arg3: index) + cf.cond_br %arg0, ^bb1, ^bb2(%arg3: index) ^bb1: - br ^bb6(%arg1 : memref) + cf.br ^bb6(%arg1 : memref) ^bb2(%0: index): %1 = memref.alloc(%0) : memref test.buffer_based in(%arg1: memref) out(%1: memref) - cond_br %arg0, ^bb3, ^bb4 + cf.cond_br %arg0, ^bb3, ^bb4 ^bb3: - br ^bb5(%1 : memref) + cf.br ^bb5(%1 : memref) ^bb4: - br ^bb5(%1 : memref) + cf.br ^bb5(%1 : memref) ^bb5(%2: memref): - br ^bb6(%2 : memref) + cf.br ^bb6(%2 : memref) ^bb6(%3: memref): - br ^bb7(%3 : memref) + cf.br ^bb7(%3 : memref) ^bb7(%4: memref): test.copy(%4, %arg2) : (memref, memref) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: ^bb2 // CHECK: ^bb2(%[[IDX:.*]]:{{.*}}) // CHECK-NEXT: %[[ALLOC0:.*]] = memref.alloc(%[[IDX]]) @@ -128,18 +128,18 @@ // CHECK-LABEL: func @criticalEdge func @criticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>) + cf.cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>) ^bb1: %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - br ^bb2(%0 : memref<2xf32>) + cf.br ^bb2(%0 : memref<2xf32>) ^bb2(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } // CHECK-NEXT: %[[ALLOC:.*]] = memref.alloc() -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // ----- @@ -155,13 +155,13 @@ func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) ^bb3(%5: memref<2xf32>, %6: memref<2xf32>): %7 = memref.alloc() : memref<2xf32> test.buffer_based in(%7: memref<2xf32>) out(%7: memref<2xf32>) @@ -171,8 +171,8 @@ // CHECK-NEXT: %[[ALLOC0:.*]] = memref.alloc() // CHECK-NEXT: test.buffer_based -// CHECK: br ^bb3 -// CHECK: br ^bb3 +// CHECK: cf.br ^bb3 +// CHECK: cf.br ^bb3 // CHECK-NEXT: ^bb3 // CHECK: %[[ALLOC1:.*]] = memref.alloc() // CHECK-NEXT: test.buffer_based @@ -193,13 +193,13 @@ func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) ^bb3(%5: memref<2xf32>, %6: memref<2xf32>): test.copy(%arg1, %arg2) : (memref<2xf32>, memref<2xf32>) return @@ -225,17 +225,17 @@ func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb5(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - cond_br %arg0, ^bb3(%3 : memref<2xf32>), ^bb4(%4 : memref<2xf32>) + cf.cond_br %arg0, ^bb3(%3 : memref<2xf32>), ^bb4(%4 : memref<2xf32>) ^bb3(%5: memref<2xf32>): - br ^bb5(%5, %3 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%5, %3 : memref<2xf32>, memref<2xf32>) ^bb4(%6: memref<2xf32>): - br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>) ^bb5(%7: memref<2xf32>, %8: memref<2xf32>): %9 = memref.alloc() : memref<2xf32> test.buffer_based in(%7: memref<2xf32>) out(%9: memref<2xf32>) @@ -245,9 +245,9 @@ // CHECK-NEXT: %[[ALLOC0:.*]] = memref.alloc() // CHECK-NEXT: test.buffer_based -// CHECK: br ^bb5 -// CHECK: br ^bb5 -// CHECK: br ^bb5 +// CHECK: cf.br ^bb5 +// CHECK: cf.br ^bb5 +// CHECK: cf.br ^bb5 // CHECK-NEXT: ^bb5 // CHECK: %[[ALLOC1:.*]] = memref.alloc() // CHECK-NEXT: test.buffer_based @@ -287,15 +287,15 @@ %cond: i1, %arg0: memref<2xf32>, %arg1: memref<2xf32>) { - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>) - br ^exit(%0 : memref<2xf32>) + cf.br ^exit(%0 : memref<2xf32>) ^bb2: %1 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg0: memref<2xf32>) out(%1: memref<2xf32>) - br ^exit(%1 : memref<2xf32>) + cf.br ^exit(%1 : memref<2xf32>) ^exit(%arg2: memref<2xf32>): test.copy(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>) return @@ -303,7 +303,7 @@ // CHECK-NEXT: %{{.*}} = memref.alloc() // CHECK-NEXT: %{{.*}} = memref.alloc() -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // ----- @@ -322,21 +322,21 @@ %cond: i1, %arg0: memref<2xf32>, %arg1: memref<2xf32>) { - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: - br ^exit(%arg0 : memref<2xf32>) + cf.br ^exit(%arg0 : memref<2xf32>) ^bb2: %1 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg0: memref<2xf32>) out(%1: memref<2xf32>) memref.dealloc %1 : memref<2xf32> - br ^exit(%1 : memref<2xf32>) + cf.br ^exit(%1 : memref<2xf32>) ^exit(%arg2: memref<2xf32>): test.copy(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>) return } // CHECK-NEXT: %{{.*}} = memref.alloc() -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // ----- @@ -351,9 +351,9 @@ %arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) { @@ -363,13 +363,13 @@ %tmp1 = math.exp %gen1_arg0 : f32 test.region_yield %tmp1 : f32 } - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } // CHECK-NEXT: %[[ALLOC0:.*]] = memref.alloc() -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: test.region_buffer_based // CHECK: %[[ALLOC1:.*]] = memref.alloc() // CHECK-NEXT: test.buffer_based @@ -556,19 +556,19 @@ // CHECK-LABEL: func @condBranchAlloca func @condBranchAlloca(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloca() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: ^bb2 // CHECK: ^bb2 // CHECK-NEXT: %[[ALLOCA:.*]] = memref.alloca() @@ -586,17 +586,17 @@ %arg2: memref<2xf32>) { %0 = memref.alloca() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb5(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - cond_br %arg0, ^bb3(%3 : memref<2xf32>), ^bb4(%4 : memref<2xf32>) + cf.cond_br %arg0, ^bb3(%3 : memref<2xf32>), ^bb4(%4 : memref<2xf32>) ^bb3(%5: memref<2xf32>): - br ^bb5(%5, %3 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%5, %3 : memref<2xf32>, memref<2xf32>) ^bb4(%6: memref<2xf32>): - br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>) ^bb5(%7: memref<2xf32>, %8: memref<2xf32>): %9 = memref.alloc() : memref<2xf32> test.buffer_based in(%7: memref<2xf32>) out(%9: memref<2xf32>) @@ -623,9 +623,9 @@ %arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) { @@ -635,13 +635,13 @@ %tmp1 = math.exp %gen1_arg0 : f32 test.region_yield %tmp1 : f32 } - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } // CHECK-NEXT: %[[ALLOC:.*]] = memref.alloc() -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: test.region_buffer_based // CHECK: %[[ALLOCA:.*]] = memref.alloca() // CHECK-NEXT: test.buffer_based diff --git a/mlir/test/Transforms/buffer-loop-hoisting.mlir b/mlir/test/Transforms/buffer-loop-hoisting.mlir --- a/mlir/test/Transforms/buffer-loop-hoisting.mlir +++ b/mlir/test/Transforms/buffer-loop-hoisting.mlir @@ -13,19 +13,19 @@ // CHECK-LABEL: func @condBranch func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: %[[ALLOC:.*]] = memref.alloc() // ----- @@ -46,19 +46,19 @@ %arg1: memref, %arg2: memref, %arg3: index) { - cond_br %arg0, ^bb1, ^bb2(%arg3: index) + cf.cond_br %arg0, ^bb1, ^bb2(%arg3: index) ^bb1: - br ^bb3(%arg1 : memref) + cf.br ^bb3(%arg1 : memref) ^bb2(%0: index): %1 = memref.alloc(%0) : memref test.buffer_based in(%arg1: memref) out(%1: memref) - br ^bb3(%1 : memref) + cf.br ^bb3(%1 : memref) ^bb3(%2: memref): test.copy(%2, %arg2) : (memref, memref) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: ^bb2 // CHECK: ^bb2(%[[IDX:.*]]:{{.*}}) // CHECK-NEXT: %[[ALLOC0:.*]] = memref.alloc(%[[IDX]]) @@ -77,9 +77,9 @@ %arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) { @@ -89,12 +89,12 @@ %tmp1 = math.exp %gen1_arg0 : f32 test.region_yield %tmp1 : f32 } - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: %[[ALLOC0:.*]] = memref.alloc() // CHECK: test.region_buffer_based // CHECK: %[[ALLOC1:.*]] = memref.alloc() diff --git a/mlir/test/Transforms/canonicalize-block-merge.mlir b/mlir/test/Transforms/canonicalize-block-merge.mlir --- a/mlir/test/Transforms/canonicalize-block-merge.mlir +++ b/mlir/test/Transforms/canonicalize-block-merge.mlir @@ -58,7 +58,7 @@ // CHECK: %[[RES:.*]] = arith.select %[[COND]], %[[ARG0]], %[[ARG1]] // CHECK: return %[[RES]] - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: return %arg0 : i32 @@ -75,7 +75,7 @@ // CHECK: %[[RES1:.*]] = arith.select %[[COND]], %[[ARG0]], %[[ARG1]] // CHECK: return %[[RES1]], %[[RES0]] - cond_br %cond, ^bb1(%arg1 : i32), ^bb2(%arg0 : i32) + cf.cond_br %cond, ^bb1(%arg1 : i32), ^bb2(%arg0 : i32) ^bb1(%arg2 : i32): return %arg0, %arg2 : i32, i32 @@ -87,9 +87,9 @@ // CHECK-LABEL: func @mismatch_argument_uses( func @mismatch_argument_uses(%cond : i1, %arg0 : i32, %arg1 : i32) -> (i32, i32) { - // CHECK: cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2 + // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2 - cond_br %cond, ^bb1(%arg1 : i32), ^bb2(%arg0 : i32) + cf.cond_br %cond, ^bb1(%arg1 : i32), ^bb2(%arg0 : i32) ^bb1(%arg2 : i32): return %arg0, %arg2 : i32, i32 @@ -101,9 +101,9 @@ // CHECK-LABEL: func @mismatch_argument_types( func @mismatch_argument_types(%cond : i1, %arg0 : i32, %arg1 : i16) { - // CHECK: cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2 + // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2 - cond_br %cond, ^bb1(%arg0 : i32), ^bb2(%arg1 : i16) + cf.cond_br %cond, ^bb1(%arg0 : i32), ^bb2(%arg1 : i16) ^bb1(%arg2 : i32): "foo.return"(%arg2) : (i32) -> () @@ -115,9 +115,9 @@ // CHECK-LABEL: func @mismatch_argument_count( func @mismatch_argument_count(%cond : i1, %arg0 : i32) { - // CHECK: cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2 + // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2 - cond_br %cond, ^bb1(%arg0 : i32), ^bb2 + cf.cond_br %cond, ^bb1(%arg0 : i32), ^bb2 ^bb1(%arg2 : i32): "foo.return"(%arg2) : (i32) -> () @@ -129,9 +129,9 @@ // CHECK-LABEL: func @mismatch_operations( func @mismatch_operations(%cond : i1) { - // CHECK: cond_br %{{.*}}, ^bb1, ^bb2 + // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2 - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: "foo.return"() : () -> () @@ -143,9 +143,9 @@ // CHECK-LABEL: func @mismatch_operation_count( func @mismatch_operation_count(%cond : i1) { - // CHECK: cond_br %{{.*}}, ^bb1, ^bb2 + // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2 - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: "foo.op"() : () -> () @@ -158,9 +158,9 @@ // CHECK-LABEL: func @contains_regions( func @contains_regions(%cond : i1) { - // CHECK: cond_br %{{.*}}, ^bb1, ^bb2 + // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2 - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: scf.if %cond { @@ -180,19 +180,19 @@ // CHECK-SAME: %[[ARG:.*]]: i1, %[[ARG2:.*]]: i1 func @mismatch_loop(%cond : i1, %cond2 : i1) { // CHECK-NEXT: %[[LOOP_CARRY:.*]] = "foo.op" - // CHECK: cond_br %{{.*}}, ^bb1(%[[ARG2]] : i1), ^bb2 + // CHECK: cf.cond_br %{{.*}}, ^bb1(%[[ARG2]] : i1), ^bb2 %cond3 = "foo.op"() : () -> (i1) - cond_br %cond, ^bb2, ^bb3 + cf.cond_br %cond, ^bb2, ^bb3 ^bb1: // CHECK: ^bb1(%[[ARG3:.*]]: i1): - // CHECK-NEXT: cond_br %[[ARG3]], ^bb1(%[[LOOP_CARRY]] : i1), ^bb2 + // CHECK-NEXT: cf.cond_br %[[ARG3]], ^bb1(%[[LOOP_CARRY]] : i1), ^bb2 - cond_br %cond3, ^bb1, ^bb3 + cf.cond_br %cond3, ^bb1, ^bb3 ^bb2: - cond_br %cond2, ^bb1, ^bb3 + cf.cond_br %cond2, ^bb1, ^bb3 ^bb3: // CHECK: ^bb2: @@ -207,20 +207,20 @@ func @mismatch_operand_types(%arg0 : i1, %arg1 : memref, %arg2 : memref) { %c0_i32 = arith.constant 0 : i32 %true = arith.constant true - br ^bb1 + cf.br ^bb1 ^bb1: - cond_br %arg0, ^bb2, ^bb3 + cf.cond_br %arg0, ^bb2, ^bb3 ^bb2: // CHECK: memref.store %{{.*}}, %{{.*}} : memref memref.store %c0_i32, %arg1[] : memref - br ^bb1 + cf.br ^bb1 ^bb3: // CHECK: memref.store %{{.*}}, %{{.*}} : memref memref.store %true, %arg2[] : memref - br ^bb1 + cf.br ^bb1 } // Check that it is illegal to merge blocks containing an operand @@ -232,21 +232,21 @@ func @nomerge(%arg0: i32, %i: i32) { %c1_i32 = arith.constant 1 : i32 %icmp = arith.cmpi slt, %i, %arg0 : i32 - cond_br %icmp, ^bb2, ^bb3 + cf.cond_br %icmp, ^bb2, ^bb3 ^bb2: // pred: ^bb1 %ip1 = arith.addi %i, %c1_i32 : i32 - br ^bb4(%ip1 : i32) + cf.br ^bb4(%ip1 : i32) ^bb7: // pred: ^bb5 %jp1 = arith.addi %j, %c1_i32 : i32 - br ^bb4(%jp1 : i32) + cf.br ^bb4(%jp1 : i32) ^bb4(%j: i32): // 2 preds: ^bb2, ^bb7 %jcmp = arith.cmpi slt, %j, %arg0 : i32 // CHECK-NOT: call @print(%[[arg1:.+]], %[[arg1]]) call @print(%j, %ip1) : (i32, i32) -> () - cond_br %jcmp, ^bb7, ^bb3 + cf.cond_br %jcmp, ^bb7, ^bb3 ^bb3: // pred: ^bb1 return diff --git a/mlir/test/Transforms/canonicalize-dce.mlir b/mlir/test/Transforms/canonicalize-dce.mlir --- a/mlir/test/Transforms/canonicalize-dce.mlir +++ b/mlir/test/Transforms/canonicalize-dce.mlir @@ -30,15 +30,15 @@ // Test case: Deleting recursively dead block arguments. // CHECK: func @f(%arg0: f32) -// CHECK-NEXT: br ^bb1 +// CHECK-NEXT: cf.br ^bb1 // CHECK-NEXT: ^bb1: -// CHECK-NEXT: br ^bb1 +// CHECK-NEXT: cf.br ^bb1 func @f(%arg0: f32) { - br ^loop(%arg0: f32) + cf.br ^loop(%arg0: f32) ^loop(%loop: f32): - br ^loop(%loop: f32) + cf.br ^loop(%loop: f32) } // ----- @@ -46,27 +46,27 @@ // Test case: Deleting recursively dead block arguments with pure ops in between. // CHECK: func @f(%arg0: f32) -// CHECK-NEXT: br ^bb1 +// CHECK-NEXT: cf.br ^bb1 // CHECK-NEXT: ^bb1: -// CHECK-NEXT: br ^bb1 +// CHECK-NEXT: cf.br ^bb1 func @f(%arg0: f32) { - br ^loop(%arg0: f32) + cf.br ^loop(%arg0: f32) ^loop(%0: f32): %1 = "math.exp"(%0) : (f32) -> f32 - br ^loop(%1: f32) + cf.br ^loop(%1: f32) } // ----- -// Test case: Delete block arguments for cond_br. +// Test case: Delete block arguments for cf.cond_br. // CHECK: func @f(%arg0: f32, %arg1: i1) // CHECK-NEXT: return func @f(%arg0: f32, %pred: i1) { %exp = "math.exp"(%arg0) : (f32) -> f32 - cond_br %pred, ^true(%exp: f32), ^false(%exp: f32) + cf.cond_br %pred, ^true(%exp: f32), ^false(%exp: f32) ^true(%0: f32): return ^false(%1: f32): diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir --- a/mlir/test/Transforms/canonicalize.mlir +++ b/mlir/test/Transforms/canonicalize.mlir @@ -389,7 +389,7 @@ func @dead_dealloc_fold_multi_use(%cond : i1) { // CHECK-NEXT: return %a = memref.alloc() : memref<4xf32> - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: memref.dealloc %a: memref<4xf32> diff --git a/mlir/test/Transforms/control-flow-sink.mlir b/mlir/test/Transforms/control-flow-sink.mlir --- a/mlir/test/Transforms/control-flow-sink.mlir +++ b/mlir/test/Transforms/control-flow-sink.mlir @@ -117,7 +117,7 @@ // CHECK-NEXT: %[[V1:.*]] = "test.any_cond"() ({ // CHECK-NEXT: %[[V3:.*]] = arith.addi %[[V0]], %[[ARG2]] // CHECK-NEXT: %[[V4:.*]] = arith.addi %[[V3]], %[[ARG1]] -// CHECK-NEXT: br ^bb1(%[[V4]] : i32) +// CHECK-NEXT: cf.br ^bb1(%[[V4]] : i32) // CHECK-NEXT: ^bb1(%[[V5:.*]]: i32): // CHECK-NEXT: %[[V6:.*]] = arith.addi %[[V5]], %[[V4]] // CHECK-NEXT: "test.yield"(%[[V6]]) @@ -129,7 +129,7 @@ %1 = arith.addi %0, %arg2 : i32 %2 = arith.addi %1, %arg1 : i32 %3 = "test.any_cond"() ({ - br ^bb1(%2 : i32) + cf.br ^bb1(%2 : i32) ^bb1(%5: i32): %6 = arith.addi %5, %2 : i32 "test.yield"(%6) : (i32) -> () @@ -184,7 +184,7 @@ // CHECK-SAME: (%[[ARG0:.*]]: i32) -> i32 { // CHECK-NEXT: %[[V0:.*]] = "test.any_cond"() ({ // CHECK-NEXT: %[[V1:.*]] = arith.addi %[[ARG0]], %[[ARG0]] -// CHECK-NEXT: br ^bb1 +// CHECK-NEXT: cf.br ^bb1 // CHECK-NEXT: ^bb1: // CHECK-NEXT: "test.yield"(%[[V1]]) : (i32) -> () // CHECK-NEXT: }) @@ -192,7 +192,7 @@ func @test_not_sunk_deeply(%arg0: i32) -> i32 { %0 = arith.addi %arg0, %arg0 : i32 %1 = "test.any_cond"() ({ - br ^bb1 + cf.br ^bb1 ^bb1: "test.yield"(%0) : (i32) -> () }) : () -> i32 diff --git a/mlir/test/Transforms/cse.mlir b/mlir/test/Transforms/cse.mlir --- a/mlir/test/Transforms/cse.mlir +++ b/mlir/test/Transforms/cse.mlir @@ -130,13 +130,13 @@ // CHECK-NEXT: %true = arith.constant true %cond = arith.constant true - // CHECK-NEXT: cond_br %true, ^bb1, ^bb2(%c1_i32 : i32) - cond_br %cond, ^bb1, ^bb2(%0 : i32) + // CHECK-NEXT: cf.cond_br %true, ^bb1, ^bb2(%c1_i32 : i32) + cf.cond_br %cond, ^bb1, ^bb2(%0 : i32) ^bb1: // CHECK: ^bb1: - // CHECK-NEXT: br ^bb2(%c1_i32 : i32) + // CHECK-NEXT: cf.br ^bb2(%c1_i32 : i32) %1 = arith.constant 1 : i32 - br ^bb2(%1 : i32) + cf.br ^bb2(%1 : i32) ^bb2(%arg : i32): return %arg : i32 @@ -167,15 +167,15 @@ // CHECK-NEXT: %true = arith.constant true %cond = arith.constant true - // CHECK-NEXT: cond_br %true, ^bb1, ^bb2(%c0_i32 : i32) - cond_br %cond, ^bb1, ^bb2(%0 : i32) + // CHECK-NEXT: cf.cond_br %true, ^bb1, ^bb2(%c0_i32 : i32) + cf.cond_br %cond, ^bb1, ^bb2(%0 : i32) ^bb1: // CHECK: ^bb1: // CHECK-NEXT: %c1_i32 = arith.constant 1 : i32 %1 = arith.constant 1 : i32 - // CHECK-NEXT: br ^bb2(%c1_i32 : i32) - br ^bb2(%1 : i32) + // CHECK-NEXT: cf.br ^bb2(%c1_i32 : i32) + cf.br ^bb2(%1 : i32) ^bb2(%arg : i32): // CHECK: ^bb2 // CHECK-NEXT: %c1_i32_0 = arith.constant 1 : i32 @@ -196,18 +196,18 @@ %0 = "foo.region"() ({ // CHECK-NEXT: %c0_i32 = arith.constant 0 : i32 // CHECK-NEXT: %true = arith.constant true - // CHECK-NEXT: cond_br + // CHECK-NEXT: cf.cond_br %1 = arith.constant 0 : i32 %true = arith.constant true - cond_br %true, ^bb1, ^bb2(%1 : i32) + cf.cond_br %true, ^bb1, ^bb2(%1 : i32) ^bb1: // CHECK: ^bb1: // CHECK-NEXT: %c1_i32 = arith.constant 1 : i32 - // CHECK-NEXT: br + // CHECK-NEXT: cf.br %c1_i32 = arith.constant 1 : i32 - br ^bb2(%c1_i32 : i32) + cf.br ^bb2(%c1_i32 : i32) ^bb2(%arg : i32): // CHECK: ^bb2(%1: i32): // CHECK-NEXT: %c1_i32_0 = arith.constant 1 : i32 diff --git a/mlir/test/Transforms/inlining.mlir b/mlir/test/Transforms/inlining.mlir --- a/mlir/test/Transforms/inlining.mlir +++ b/mlir/test/Transforms/inlining.mlir @@ -20,7 +20,7 @@ // Inline a function that has multiple return operations. func @func_with_multi_return(%a : i1) -> (i32) { - cond_br %a, ^bb1, ^bb2 + cf.cond_br %a, ^bb1, ^bb2 ^bb1: %const_0 = arith.constant 0 : i32 @@ -34,13 +34,13 @@ // CHECK-LABEL: func @inline_with_multi_return() -> i32 func @inline_with_multi_return() -> i32 { // CHECK-NEXT: [[VAL_7:%.*]] = arith.constant false -// CHECK-NEXT: cond_br [[VAL_7]], ^bb1, ^bb2 +// CHECK-NEXT: cf.cond_br [[VAL_7]], ^bb1, ^bb2 // CHECK: ^bb1: // CHECK-NEXT: [[VAL_8:%.*]] = arith.constant 0 : i32 -// CHECK-NEXT: br ^bb3([[VAL_8]] : i32) +// CHECK-NEXT: cf.br ^bb3([[VAL_8]] : i32) // CHECK: ^bb2: // CHECK-NEXT: [[VAL_9:%.*]] = arith.constant 55 : i32 -// CHECK-NEXT: br ^bb3([[VAL_9]] : i32) +// CHECK-NEXT: cf.br ^bb3([[VAL_9]] : i32) // CHECK: ^bb3([[VAL_10:%.*]]: i32): // CHECK-NEXT: return [[VAL_10]] : i32 @@ -133,7 +133,7 @@ } func @convert_callee_fn_multiblock() -> i32 { - br ^bb0 + cf.br ^bb0 ^bb0: %0 = arith.constant 0 : i32 return %0 : i32 @@ -141,10 +141,10 @@ // CHECK-LABEL: func @inline_convert_result_multiblock func @inline_convert_result_multiblock() -> i16 { -// CHECK: br ^bb1 {inlined_conversion} +// CHECK: cf.br ^bb1 {inlined_conversion} // CHECK: ^bb1: // CHECK: %[[C:.+]] = arith.constant {inlined_conversion} 0 : i32 -// CHECK: br ^bb2(%[[C]] : i32) +// CHECK: cf.br ^bb2(%[[C]] : i32) // CHECK: ^bb2(%[[BBARG:.+]]: i32): // CHECK: %[[CAST_RESULT:.+]] = "test.cast"(%[[BBARG]]) : (i32) -> i16 // CHECK: return %[[CAST_RESULT]] : i16 @@ -206,14 +206,14 @@ // Test block arguments location propagation. // Use two call-sites to force cloning. func @func_with_block_args_location(%arg0 : i32) { - br ^bb1(%arg0 : i32) + cf.br ^bb1(%arg0 : i32) ^bb1(%x : i32 loc("foo")): "test.foo" (%x) : (i32) -> () loc("bar") return } // INLINE-LOC-LABEL: func @func_with_block_args_location_callee1 -// INLINE-LOC: br +// INLINE-LOC: cf.br // INLINE-LOC: ^bb{{[0-9]+}}(%{{.*}}: i32 loc("foo") func @func_with_block_args_location_callee1(%arg0 : i32) { call @func_with_block_args_location(%arg0) : (i32) -> () diff --git a/mlir/test/Transforms/normalize-memrefs.mlir b/mlir/test/Transforms/normalize-memrefs.mlir --- a/mlir/test/Transforms/normalize-memrefs.mlir +++ b/mlir/test/Transforms/normalize-memrefs.mlir @@ -206,7 +206,7 @@ %a = affine.load %A[0] : memref<16xf64, #tile> %p = arith.mulf %a, %a : f64 %cond = arith.constant 1 : i1 - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: %res1, %res2 = call @ret_single_argument_type(%C) : (memref<8xf64, #tile>) -> (memref<16xf64, #tile>, memref<8xf64, #tile>) return %res2, %p: memref<8xf64, #tile>, f64 @@ -217,7 +217,7 @@ // CHECK: %[[a:[0-9]+]] = affine.load %[[A]][0, 0] : memref<4x4xf64> // CHECK: %[[p:[0-9]+]] = arith.mulf %[[a]], %[[a]] : f64 // CHECK: %true = arith.constant true -// CHECK: cond_br %true, ^bb1, ^bb2 +// CHECK: cf.cond_br %true, ^bb1, ^bb2 // CHECK: ^bb1: // pred: ^bb0 // CHECK: %[[res:[0-9]+]]:2 = call @ret_single_argument_type(%[[C]]) : (memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>) // CHECK: return %[[res]]#1, %[[p]] : memref<2x4xf64>, f64 diff --git a/mlir/test/Transforms/promote-buffers-to-stack.mlir b/mlir/test/Transforms/promote-buffers-to-stack.mlir --- a/mlir/test/Transforms/promote-buffers-to-stack.mlir +++ b/mlir/test/Transforms/promote-buffers-to-stack.mlir @@ -16,19 +16,19 @@ // CHECK-LABEL: func @condBranch func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } -// CHECK-NEXT: cond_br {{.*}} +// CHECK-NEXT: cf.cond_br {{.*}} // CHECK: ^bb2 // CHECK-NEXT: %[[ALLOCA:.*]] = memref.alloca() // CHECK: test.copy @@ -51,24 +51,24 @@ %arg1: memref, %arg2: memref, %arg3: index) { - cond_br %arg0, ^bb1, ^bb2(%arg3: index) + cf.cond_br %arg0, ^bb1, ^bb2(%arg3: index) ^bb1: - br ^bb3(%arg1 : memref) + cf.br ^bb3(%arg1 : memref) ^bb2(%0: index): %1 = memref.alloc(%0) : memref test.buffer_based in(%arg1: memref) out(%1: memref) - br ^bb3(%1 : memref) + cf.br ^bb3(%1 : memref) ^bb3(%2: memref): test.copy(%2, %arg2) : (memref, memref) return } -// CHECK-NEXT: cond_br +// CHECK-NEXT: cf.cond_br // CHECK: ^bb2 // CHECK: ^bb2(%[[IDX:.*]]:{{.*}}) // CHECK-NEXT: %[[ALLOC0:.*]] = memref.alloc(%[[IDX]]) // CHECK-NEXT: test.buffer_based -// CHECK: br ^bb3 +// CHECK: cf.br ^bb3 // CHECK-NEXT: ^bb3(%[[ALLOC0:.*]]:{{.*}}) // CHECK: test.copy(%[[ALLOC0]], // CHECK-NEXT: return @@ -135,17 +135,17 @@ // CHECK-LABEL: func @criticalEdge func @criticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>) + cf.cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>) ^bb1: %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - br ^bb2(%0 : memref<2xf32>) + cf.br ^bb2(%0 : memref<2xf32>) ^bb2(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } -// CHECK-NEXT: cond_br {{.*}} +// CHECK-NEXT: cf.cond_br {{.*}} // CHECK: ^bb1 // CHECK-NEXT: %[[ALLOCA:.*]] = memref.alloca() // CHECK: test.copy @@ -165,16 +165,16 @@ func @invCriticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>) + cf.cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>) ^bb1: - br ^bb2(%0 : memref<2xf32>) + cf.br ^bb2(%0 : memref<2xf32>) ^bb2(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } // CHECK-NEXT: %[[ALLOCA:.*]] = memref.alloca() -// CHECK: cond_br +// CHECK: cf.cond_br // CHECK: test.copy // CHECK-NEXT: return @@ -192,13 +192,13 @@ func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) ^bb3(%5: memref<2xf32>, %6: memref<2xf32>): %7 = memref.alloc() : memref<2xf32> test.buffer_based in(%5: memref<2xf32>) out(%7: memref<2xf32>) @@ -227,13 +227,13 @@ func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) + cf.br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>) ^bb3(%5: memref<2xf32>, %6: memref<2xf32>): test.copy(%arg1, %arg2) : (memref<2xf32>, memref<2xf32>) return @@ -260,17 +260,17 @@ func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) - cond_br %arg0, + cf.cond_br %arg0, ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>), ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>) ^bb1(%1: memref<2xf32>, %2: memref<2xf32>): - br ^bb5(%1, %2 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%1, %2 : memref<2xf32>, memref<2xf32>) ^bb2(%3: memref<2xf32>, %4: memref<2xf32>): - cond_br %arg0, ^bb3(%3 : memref<2xf32>), ^bb4(%4 : memref<2xf32>) + cf.cond_br %arg0, ^bb3(%3 : memref<2xf32>), ^bb4(%4 : memref<2xf32>) ^bb3(%5: memref<2xf32>): - br ^bb5(%5, %3 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%5, %3 : memref<2xf32>, memref<2xf32>) ^bb4(%6: memref<2xf32>): - br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>) + cf.br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>) ^bb5(%7: memref<2xf32>, %8: memref<2xf32>): %9 = memref.alloc() : memref<2xf32> test.buffer_based in(%7: memref<2xf32>) out(%9: memref<2xf32>) @@ -323,21 +323,21 @@ %cond: i1, %arg0: memref<2xf32>, %arg1: memref<2xf32>) { - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: %0 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>) - br ^exit(%0 : memref<2xf32>) + cf.br ^exit(%0 : memref<2xf32>) ^bb2: %1 = memref.alloc() : memref<2xf32> test.buffer_based in(%arg0: memref<2xf32>) out(%1: memref<2xf32>) - br ^exit(%1 : memref<2xf32>) + cf.br ^exit(%1 : memref<2xf32>) ^exit(%arg2: memref<2xf32>): test.copy(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>) return } -// CHECK-NEXT: cond_br {{.*}} +// CHECK-NEXT: cf.cond_br {{.*}} // CHECK: ^bb1 // CHECK-NEXT: %{{.*}} = memref.alloca() // CHECK: ^bb2 @@ -357,9 +357,9 @@ %arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) { @@ -369,13 +369,13 @@ %tmp1 = math.exp %gen1_arg0 : f32 test.region_yield %tmp1 : f32 } - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>) return } -// CHECK-NEXT: cond_br {{.*}} +// CHECK-NEXT: cf.cond_br {{.*}} // CHECK: ^bb2 // CHECK-NEXT: %[[ALLOCA0:.*]] = memref.alloca() // CHECK: ^bb0 diff --git a/mlir/test/Transforms/sccp-callgraph.mlir b/mlir/test/Transforms/sccp-callgraph.mlir --- a/mlir/test/Transforms/sccp-callgraph.mlir +++ b/mlir/test/Transforms/sccp-callgraph.mlir @@ -185,11 +185,11 @@ func private @complex_inner_if(%arg0 : i32) -> i32 { // CHECK-DAG: %[[TRUE:.*]] = arith.constant true // CHECK-DAG: %[[CST:.*]] = arith.constant 1 : i32 - // CHECK: cond_br %[[TRUE]], ^bb1 + // CHECK: cf.cond_br %[[TRUE]], ^bb1 %cst_20 = arith.constant 20 : i32 %cond = arith.cmpi ult, %arg0, %cst_20 : i32 - cond_br %cond, ^bb1, ^bb2 + cf.cond_br %cond, ^bb1, ^bb2 ^bb1: // CHECK: ^bb1: @@ -211,7 +211,7 @@ // CHECK: %[[CST:.*]] = arith.constant 1 : i32 %loop_cond = call @complex_cond() : () -> i1 - cond_br %loop_cond, ^bb1, ^bb2 + cf.cond_br %loop_cond, ^bb1, ^bb2 ^bb1: // CHECK: ^bb1: diff --git a/mlir/test/Transforms/sccp.mlir b/mlir/test/Transforms/sccp.mlir --- a/mlir/test/Transforms/sccp.mlir +++ b/mlir/test/Transforms/sccp.mlir @@ -22,10 +22,10 @@ %cond = arith.constant true %1 = arith.constant 1 : i32 - cond_br %cond, ^bb1, ^bb2(%arg0 : i32) + cf.cond_br %cond, ^bb1, ^bb2(%arg0 : i32) ^bb1: - br ^bb2(%1 : i32) + cf.br ^bb2(%1 : i32) ^bb2(%arg : i32): // CHECK: ^bb2(%{{.*}}: i32): @@ -40,10 +40,10 @@ // CHECK-LABEL: func @simple_control_flow_overdefined func @simple_control_flow_overdefined(%arg0 : i32, %arg1 : i1) -> i32 { %1 = arith.constant 1 : i32 - cond_br %arg1, ^bb1, ^bb2(%arg0 : i32) + cf.cond_br %arg1, ^bb1, ^bb2(%arg0 : i32) ^bb1: - br ^bb2(%1 : i32) + cf.br ^bb2(%1 : i32) ^bb2(%arg : i32): // CHECK: ^bb2(%[[ARG:.*]]: i32): @@ -59,10 +59,10 @@ func @simple_control_flow_constant_overdefined(%arg0 : i32, %arg1 : i1) -> i32 { %1 = arith.constant 1 : i32 %2 = arith.constant 2 : i32 - cond_br %arg1, ^bb1, ^bb2(%arg0 : i32) + cf.cond_br %arg1, ^bb1, ^bb2(%arg0 : i32) ^bb1: - br ^bb2(%2 : i32) + cf.br ^bb2(%2 : i32) ^bb2(%arg : i32): // CHECK: ^bb2(%[[ARG:.*]]: i32): @@ -79,7 +79,7 @@ "foo.cond_br"() [^bb1, ^bb2] : () -> () ^bb1: - br ^bb2(%1 : i32) + cf.br ^bb2(%1 : i32) ^bb2(%arg : i32): // CHECK: ^bb2(%[[ARG:.*]]: i32): @@ -97,17 +97,17 @@ // CHECK: %[[CST:.*]] = arith.constant 1 : i32 %cst_1 = arith.constant 1 : i32 - cond_br %cond1, ^bb1(%cst_1 : i32), ^bb2(%cst_1 : i32) + cf.cond_br %cond1, ^bb1(%cst_1 : i32), ^bb2(%cst_1 : i32) ^bb1(%iv: i32): // CHECK: ^bb1(%{{.*}}: i32): // CHECK-NEXT: %[[COND:.*]] = call @ext_cond_fn() - // CHECK-NEXT: cond_br %[[COND]], ^bb1(%[[CST]] : i32), ^bb2(%[[CST]] : i32) + // CHECK-NEXT: cf.cond_br %[[COND]], ^bb1(%[[CST]] : i32), ^bb2(%[[CST]] : i32) %cst_0 = arith.constant 0 : i32 %res = arith.addi %iv, %cst_0 : i32 %cond2 = call @ext_cond_fn() : () -> i1 - cond_br %cond2, ^bb1(%res : i32), ^bb2(%res : i32) + cf.cond_br %cond2, ^bb1(%res : i32), ^bb2(%res : i32) ^bb2(%arg : i32): // CHECK: ^bb2(%{{.*}}: i32): @@ -126,30 +126,30 @@ // CHECK-DAG: %[[TRUE:.*]] = arith.constant true %cst_1 = arith.constant 1 : i32 - br ^bb1(%cst_1 : i32) + cf.br ^bb1(%cst_1 : i32) ^bb1(%iv: i32): %cond2 = call @ext_cond_fn() : () -> i1 - cond_br %cond2, ^bb5(%iv : i32), ^bb2 + cf.cond_br %cond2, ^bb5(%iv : i32), ^bb2 ^bb2: // CHECK: ^bb2: - // CHECK: cond_br %[[TRUE]], ^bb3, ^bb4 + // CHECK: cf.cond_br %[[TRUE]], ^bb3, ^bb4 %cst_20 = arith.constant 20 : i32 %cond = arith.cmpi ult, %iv, %cst_20 : i32 - cond_br %cond, ^bb3, ^bb4 + cf.cond_br %cond, ^bb3, ^bb4 ^bb3: // CHECK: ^bb3: - // CHECK: br ^bb1(%[[CST]] : i32) + // CHECK: cf.br ^bb1(%[[CST]] : i32) %cst_1_2 = arith.constant 1 : i32 - br ^bb1(%cst_1_2 : i32) + cf.br ^bb1(%cst_1_2 : i32) ^bb4: %iv_inc = arith.addi %iv, %cst_1 : i32 - br ^bb1(%iv_inc : i32) + cf.br ^bb1(%iv_inc : i32) ^bb5(%result: i32): // CHECK: ^bb5(%{{.*}}: i32): @@ -166,11 +166,11 @@ // CHECK-LABEL: func @simple_loop_overdefined func @simple_loop_overdefined(%arg0 : i32, %cond1 : i1) -> i32 { %cst_1 = arith.constant 1 : i32 - cond_br %cond1, ^bb1(%cst_1 : i32), ^bb2(%cst_1 : i32) + cf.cond_br %cond1, ^bb1(%cst_1 : i32), ^bb2(%cst_1 : i32) ^bb1(%iv: i32): %cond2, %res = call @ext_cond_and_value_fn() : () -> (i1, i32) - cond_br %cond2, ^bb1(%res : i32), ^bb2(%res : i32) + cf.cond_br %cond2, ^bb1(%res : i32), ^bb2(%res : i32) ^bb2(%arg : i32): // CHECK: ^bb2(%[[ARG:.*]]: i32): @@ -185,13 +185,13 @@ func @recheck_executable_edge(%cond0: i1) -> (i1, i1) { %true = arith.constant true %false = arith.constant false - cond_br %cond0, ^bb_1a, ^bb2(%false : i1) + cf.cond_br %cond0, ^bb_1a, ^bb2(%false : i1) ^bb_1a: - br ^bb2(%true : i1) + cf.br ^bb2(%true : i1) ^bb2(%x: i1): // CHECK: ^bb2(%[[X:.*]]: i1): - br ^bb3(%x : i1) + cf.br ^bb3(%x : i1) ^bb3(%y: i1): // CHECK: ^bb3(%[[Y:.*]]: i1): diff --git a/mlir/test/Transforms/test-legalizer-full.mlir b/mlir/test/Transforms/test-legalizer-full.mlir --- a/mlir/test/Transforms/test-legalizer-full.mlir +++ b/mlir/test/Transforms/test-legalizer-full.mlir @@ -89,8 +89,8 @@ func @test_undo_region_inline() { "test.region"() ({ ^bb1(%i0: i64): - // expected-error@+1 {{failed to legalize operation 'std.br'}} - br ^bb2(%i0 : i64) + // expected-error@+1 {{failed to legalize operation 'cf.br'}} + cf.br ^bb2(%i0 : i64) ^bb2(%i1: i64): "test.invalid"(%i1) : (i64) -> () }) {} : () -> () @@ -110,7 +110,7 @@ // expected-error@+1 {{failed to legalize operation 'test.region'}} "test.region"() ({ ^bb1(%i0: i64): - br ^bb2(%i0 : i64) + cf.br ^bb2(%i0 : i64) ^bb2(%i1: i64): "test.invalid"(%i1) : (i64) -> () }) {legalizer.should_clone, legalizer.erase_old_blocks} : () -> () diff --git a/mlir/test/mlir-cpu-runner/async-error.mlir b/mlir/test/mlir-cpu-runner/async-error.mlir --- a/mlir/test/mlir-cpu-runner/async-error.mlir +++ b/mlir/test/mlir-cpu-runner/async-error.mlir @@ -3,7 +3,7 @@ // RUN: -async-runtime-ref-counting-opt \ // RUN: -convert-async-to-llvm \ // RUN: -convert-linalg-to-loops \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-linalg-to-llvm \ // RUN: -convert-vector-to-llvm \ // RUN: -convert-arith-to-llvm \ @@ -35,7 +35,7 @@ // Check that assertion in the async region converted to async error. // ------------------------------------------------------------------------ // %token1 = async.execute { - assert %false, "error" + cf.assert %false, "error" async.yield } async.runtime.await %token1 : !async.token @@ -49,7 +49,7 @@ // ------------------------------------------------------------------------ // %token2 = async.execute { %token = async.execute { - assert %false, "error" + cf.assert %false, "error" async.yield } async.await %token : !async.token @@ -66,7 +66,7 @@ // ------------------------------------------------------------------------ // %token3, %value3 = async.execute -> !async.value { %token, %value = async.execute -> !async.value { - assert %false, "error" + cf.assert %false, "error" %0 = arith.constant 123.45 : f32 async.yield %0 : f32 } @@ -95,7 +95,7 @@ } %token5 = async.execute { - assert %false, "error" + cf.assert %false, "error" async.yield } diff --git a/mlir/test/mlir-cpu-runner/async.mlir b/mlir/test/mlir-cpu-runner/async.mlir --- a/mlir/test/mlir-cpu-runner/async.mlir +++ b/mlir/test/mlir-cpu-runner/async.mlir @@ -3,7 +3,7 @@ // RUN: -async-runtime-ref-counting-opt \ // RUN: -convert-async-to-llvm \ // RUN: -convert-linalg-to-loops \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-linalg-to-llvm \ // RUN: -convert-memref-to-llvm \ // RUN: -convert-arith-to-llvm \ diff --git a/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir b/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir --- a/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir +++ b/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-arith-to-llvm -convert-memref-to-llvm -convert-std-to-llvm='use-bare-ptr-memref-call-conv=1' -reconcile-unrealized-casts | mlir-cpu-runner -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext -entry-point-result=void | FileCheck %s +// RUN: mlir-opt %s -convert-scf-to-cf -convert-arith-to-llvm -convert-memref-to-llvm -convert-std-to-llvm='use-bare-ptr-memref-call-conv=1' -reconcile-unrealized-casts | mlir-cpu-runner -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext -entry-point-result=void | FileCheck %s // Verify bare pointer memref calling convention. `simple_add1_add2_test` // gets two 2xf32 memrefs, adds 1.0f to the first one and 2.0f to the second diff --git a/mlir/test/mlir-cpu-runner/copy.mlir b/mlir/test/mlir-cpu-runner/copy.mlir --- a/mlir/test/mlir-cpu-runner/copy.mlir +++ b/mlir/test/mlir-cpu-runner/copy.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-arith-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-arith-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir b/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir --- a/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir +++ b/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -convert-memref-to-llvm -convert-arith-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -convert-scf-to-cf -convert-memref-to-llvm -convert-arith-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/memref-reshape.mlir b/mlir/test/mlir-cpu-runner/memref-reshape.mlir --- a/mlir/test/mlir-cpu-runner/memref-reshape.mlir +++ b/mlir/test/mlir-cpu-runner/memref-reshape.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-std -memref-expand -convert-arith-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -convert-scf-to-cf -memref-expand -convert-arith-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir b/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir --- a/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir +++ b/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-linalg-to-loops -lower-affine -convert-scf-to-std -convert-arith-to-llvm -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts %s | mlir-cpu-runner -O3 -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s +// RUN: mlir-opt -convert-linalg-to-loops -lower-affine -convert-scf-to-cf -convert-arith-to-llvm -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts %s | mlir-cpu-runner -O3 -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s func @main() { %A = memref.alloc() : memref<16x16xf32> diff --git a/mlir/test/mlir-cpu-runner/unranked-memref.mlir b/mlir/test/mlir-cpu-runner/unranked-memref.mlir --- a/mlir/test/mlir-cpu-runner/unranked-memref.mlir +++ b/mlir/test/mlir-cpu-runner/unranked-memref.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s -convert-linalg-to-loops \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-arith-to-llvm \ // RUN: -convert-linalg-to-llvm \ // RUN: -convert-memref-to-llvm \ diff --git a/mlir/test/mlir-cpu-runner/utils.mlir b/mlir/test/mlir-cpu-runner/utils.mlir --- a/mlir/test/mlir-cpu-runner/utils.mlir +++ b/mlir/test/mlir-cpu-runner/utils.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_0d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-0D -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_1d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-1D -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_3d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-3D -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-std -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e vector_splat_2d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-VECTOR-SPLAT-2D +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_0d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-0D +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_1d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-1D +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_3d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-3D +// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e vector_splat_2d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-VECTOR-SPLAT-2D func @print_0d() { %f = arith.constant 2.00000e+00 : f32 diff --git a/mlir/test/mlir-lsp-server/hover.test b/mlir/test/mlir-lsp-server/hover.test --- a/mlir/test/mlir-lsp-server/hover.test +++ b/mlir/test/mlir-lsp-server/hover.test @@ -5,7 +5,7 @@ "uri":"test:///foo.mlir", "languageId":"mlir", "version":1, - "text":"func @foo(%arg: i1) {\n%value = arith.constant true\nbr ^bb2\n^bb2:\nreturn\n}" + "text":"func @foo(%arg: i1) {\n%value = arith.constant true\ncf.br ^bb2\n^bb2:\nreturn\n}" }}} // ----- // Hover on an operation. diff --git a/mlir/test/mlir-opt/async.mlir b/mlir/test/mlir-opt/async.mlir --- a/mlir/test/mlir-opt/async.mlir +++ b/mlir/test/mlir-opt/async.mlir @@ -5,7 +5,7 @@ // RUN: -async-runtime-ref-counting-opt \ // RUN: -convert-async-to-llvm \ // RUN: -convert-linalg-to-loops \ -// RUN: -convert-scf-to-std \ +// RUN: -convert-scf-to-cf \ // RUN: -convert-linalg-to-llvm \ // RUN: -convert-memref-to-llvm \ // RUN: -convert-arith-to-llvm \ diff --git a/mlir/test/mlir-opt/commandline.mlir b/mlir/test/mlir-opt/commandline.mlir --- a/mlir/test/mlir-opt/commandline.mlir +++ b/mlir/test/mlir-opt/commandline.mlir @@ -9,6 +9,7 @@ // CHECK-NEXT: async // CHECK-NEXT: bufferization // CHECK-NEXT: builtin +// CHECK-NEXT: cf // CHECK-NEXT: complex // CHECK-NEXT: dlti // CHECK-NEXT: emitc diff --git a/mlir/test/mlir-reduce/multiple-function.mlir b/mlir/test/mlir-reduce/multiple-function.mlir --- a/mlir/test/mlir-reduce/multiple-function.mlir +++ b/mlir/test/mlir-reduce/multiple-function.mlir @@ -22,12 +22,12 @@ // CHECK-NOT: func @simple4() { func @simple4(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): "test.op_crash"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> () return diff --git a/mlir/test/mlir-reduce/simple-test.mlir b/mlir/test/mlir-reduce/simple-test.mlir --- a/mlir/test/mlir-reduce/simple-test.mlir +++ b/mlir/test/mlir-reduce/simple-test.mlir @@ -2,12 +2,12 @@ // RUN: mlir-reduce %s -reduction-tree='traversal-mode=0 test=%S/test.sh' func @simple1(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { - cond_br %arg0, ^bb1, ^bb2 + cf.cond_br %arg0, ^bb1, ^bb2 ^bb1: - br ^bb3(%arg1 : memref<2xf32>) + cf.br ^bb3(%arg1 : memref<2xf32>) ^bb2: %0 = memref.alloc() : memref<2xf32> - br ^bb3(%0 : memref<2xf32>) + cf.br ^bb3(%0 : memref<2xf32>) ^bb3(%1: memref<2xf32>): return } diff --git a/mlir/test/python/execution_engine.py b/mlir/test/python/execution_engine.py --- a/mlir/test/python/execution_engine.py +++ b/mlir/test/python/execution_engine.py @@ -276,28 +276,28 @@ %c0 = arith.constant 0 : index %c2 = arith.constant 2 : index %c1 = arith.constant 1 : index - br ^bb1(%c0 : index) + cf.br ^bb1(%c0 : index) ^bb1(%0: index): // 2 preds: ^bb0, ^bb5 %1 = arith.cmpi slt, %0, %c2 : index - cond_br %1, ^bb2, ^bb6 + cf.cond_br %1, ^bb2, ^bb6 ^bb2: // pred: ^bb1 %c0_0 = arith.constant 0 : index %c2_1 = arith.constant 2 : index %c1_2 = arith.constant 1 : index - br ^bb3(%c0_0 : index) + cf.br ^bb3(%c0_0 : index) ^bb3(%2: index): // 2 preds: ^bb2, ^bb4 %3 = arith.cmpi slt, %2, %c2_1 : index - cond_br %3, ^bb4, ^bb5 + cf.cond_br %3, ^bb4, ^bb5 ^bb4: // pred: ^bb3 %4 = memref.load %arg0[%0, %2] : memref<2x2xf32> %5 = memref.load %arg1[%0, %2] : memref %6 = arith.addf %4, %5 : f32 memref.store %6, %arg2[%0, %2] : memref<2x2xf32> %7 = arith.addi %2, %c1_2 : index - br ^bb3(%7 : index) + cf.br ^bb3(%7 : index) ^bb5: // pred: ^bb3 %8 = arith.addi %0, %c1 : index - br ^bb1(%8 : index) + cf.br ^bb1(%8 : index) ^bb6: // pred: ^bb1 return } diff --git a/mlir/test/python/integration/dialects/linalg/opsrun.py b/mlir/test/python/integration/dialects/linalg/opsrun.py --- a/mlir/test/python/integration/dialects/linalg/opsrun.py +++ b/mlir/test/python/integration/dialects/linalg/opsrun.py @@ -128,7 +128,7 @@ boilerplate) pm = PassManager.parse( "builtin.func(convert-linalg-to-loops, lower-affine, " + - "convert-scf-to-std, arith-expand, memref-expand), convert-vector-to-llvm," + + "convert-scf-to-cf, arith-expand, memref-expand), convert-vector-to-llvm," + "convert-memref-to-llvm, convert-std-to-llvm," + "reconcile-unrealized-casts") pm.run(mod) diff --git a/mlir/test/python/ir/blocks.py b/mlir/test/python/ir/blocks.py --- a/mlir/test/python/ir/blocks.py +++ b/mlir/test/python/ir/blocks.py @@ -19,9 +19,9 @@ # CHECK-LABEL: TEST: testBlockCreation # CHECK: func @test(%[[ARG0:.*]]: i32, %[[ARG1:.*]]: i16) -# CHECK: br ^bb1(%[[ARG1]] : i16) +# CHECK: cf.br ^bb1(%[[ARG1]] : i16) # CHECK: ^bb1(%[[PHI0:.*]]: i16): -# CHECK: br ^bb2(%[[ARG0]] : i32) +# CHECK: cf.br ^bb2(%[[ARG0]] : i32) # CHECK: ^bb2(%[[PHI1:.*]]: i32): # CHECK: return @run diff --git a/mlir/test/python/ir/dialects.py b/mlir/test/python/ir/dialects.py --- a/mlir/test/python/ir/dialects.py +++ b/mlir/test/python/ir/dialects.py @@ -100,7 +100,7 @@ def testIsRegisteredOperation(): ctx = Context() - # CHECK: std.cond_br: True - print(f"std.cond_br: {ctx.is_registered_operation('std.cond_br')}") + # CHECK: cf.cond_br: True + print(f"cf.cond_br: {ctx.is_registered_operation('cf.cond_br')}") # CHECK: std.not_existing: False print(f"std.not_existing: {ctx.is_registered_operation('std.not_existing')}")