diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h @@ -56,7 +56,7 @@ // // %num_elements = arith.constant 256 // %idx = arith.constant 0 : index -// %tag = alloc() : memref<1xi32, 4> +// %tag = memref.alloc() : memref<1xi32, 4> // affine.dma_start %src[%i + 3, %j], %dst[%k + 7, %l], %tag[%idx], // %num_elements : // memref<40x128xf32, 0>, memref<2x1024xf32, 1>, memref<1xi32, 2> diff --git a/mlir/include/mlir/Dialect/Affine/Passes.td b/mlir/include/mlir/Dialect/Affine/Passes.td --- a/mlir/include/mlir/Dialect/Affine/Passes.td +++ b/mlir/include/mlir/Dialect/Affine/Passes.td @@ -107,7 +107,7 @@ ```mlir func @store_load_affine_apply() -> memref<10x10xf32> { %cf7 = arith.constant 7.0 : f32 - %m = alloc() : memref<10x10xf32> + %m = memref.alloc() : memref<10x10xf32> affine.for %i0 = 0 to 10 { affine.for %i1 = 0 to 10 { affine.store %cf7, %m[%i0, %i1] : memref<10x10xf32> @@ -125,7 +125,7 @@ module { func @store_load_affine_apply() -> memref<10x10xf32> { %cst = arith.constant 7.000000e+00 : f32 - %0 = alloc() : memref<10x10xf32> + %0 = memref.alloc() : memref<10x10xf32> affine.for %arg0 = 0 to 10 { affine.for %arg1 = 0 to 10 { affine.store %cst, %0[%arg0, %arg1] : memref<10x10xf32> diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td @@ -30,7 +30,7 @@ ^bb1: br ^bb3(%arg1 : memref<2xf32>) ^bb2: - %0 = alloc() : memref<2xf32> + %0 = memref.alloc() : memref<2xf32> linalg.generic { args_in = 1 : i64, args_out = 1 : i64, @@ -57,11 +57,11 @@ func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) { cond_br %arg0, ^bb1, ^bb2 ^bb1: // pred: ^bb0 - %0 = alloc() : memref<2xf32> + %0 = memref.alloc() : memref<2xf32> linalg.copy(%arg1, %0) : memref<2xf32>, memref<2xf32> br ^bb3(%0 : memref<2xf32>) ^bb2: // pred: ^bb0 - %1 = alloc() : memref<2xf32> + %1 = memref.alloc() : memref<2xf32> linalg.generic { args_in = 1 : i64, args_out = 1 : i64, @@ -71,7 +71,7 @@ %4 = exp %arg3 : f32 linalg.yield %4 : f32 }: memref<2xf32>, memref<2xf32> - %2 = alloc() : memref<2xf32> + %2 = memref.alloc() : memref<2xf32> linalg.copy(%1, %2) : memref<2xf32>, memref<2xf32> dealloc %1 : memref<2xf32> br ^bb3(%2 : memref<2xf32>) diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td @@ -519,7 +519,7 @@ ```mlir %num_elements = arith.constant 256 %idx = arith.constant 0 : index - %tag = alloc() : memref<1 x i32, affine_map<(d0) -> (d0)>, 4> + %tag = memref.alloc() : memref<1 x i32, affine_map<(d0) -> (d0)>, 4> dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx] : memref<40 x 128 x f32>, affine_map<(d0) -> (d0)>, 0>, memref<2 x 1024 x f32>, affine_map<(d0) -> (d0)>, 1>, @@ -1561,7 +1561,7 @@ ```mlir %9 = dim %8, 1 : tensor<4x?xf32> - %10 = alloc(%9) : memref<4x?xf32, #layout, memspace0> + %10 = memref.alloc(%9) : memref<4x?xf32, #layout, memspace0> memref.tensor_store %8, %10 : memref<4x?xf32, #layout, memspace0> ``` }]; diff --git a/mlir/include/mlir/Dialect/Vector/VectorOps.td b/mlir/include/mlir/Dialect/Vector/VectorOps.td --- a/mlir/include/mlir/Dialect/Vector/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/VectorOps.td @@ -2083,7 +2083,7 @@ Example: ```mlir - %A = alloc() : memref<5x4x3xf32> + %A = memref.alloc() : memref<5x4x3xf32> %VA = vector.type_cast %A : memref<5x4x3xf32> to memref> ``` }]; diff --git a/mlir/include/mlir/Transforms/Passes.td b/mlir/include/mlir/Transforms/Passes.td --- a/mlir/include/mlir/Transforms/Passes.td +++ b/mlir/include/mlir/Transforms/Passes.td @@ -37,8 +37,8 @@ Input: ```mlir func @producer_consumer_fusion(%arg0: memref<10xf32>, %arg1: memref<10xf32>) { - %0 = alloc() : memref<10xf32> - %1 = alloc() : memref<10xf32> + %0 = memref.alloc() : memref<10xf32> + %1 = memref.alloc() : memref<10xf32> %cst = arith.constant 0.000000e+00 : f32 affine.for %arg2 = 0 to 10 { affine.store %cst, %0[%arg2] : memref<10xf32> @@ -60,8 +60,8 @@ Output: ```mlir func @producer_consumer_fusion(%arg0: memref<10xf32>, %arg1: memref<10xf32>) { - %0 = alloc() : memref<1xf32> - %1 = alloc() : memref<1xf32> + %0 = memref.alloc() : memref<1xf32> + %1 = memref.alloc() : memref<1xf32> %cst = arith.constant 0.000000e+00 : f32 affine.for %arg2 = 0 to 10 { affine.store %cst, %0[0] : memref<1xf32> @@ -161,9 +161,9 @@ ```mlir func @pipelinedatatransfer() { - %0 = alloc() : memref<256xf32> - %1 = alloc() : memref<32xf32, 1> - %2 = alloc() : memref<1xf32> + %0 = memref.alloc() : memref<256xf32> + %1 = memref.alloc() : memref<32xf32, 1> + %2 = memref.alloc() : memref<1xf32> %c0 = arith.constant 0 : index %c128 = arith.constant 128 : index affine.for %i0 = 0 to 8 { @@ -184,11 +184,11 @@ func @pipelinedatatransfer() { %c8 = arith.constant 8 : index %c0 = arith.constant 0 : index - %0 = alloc() : memref<256xf32> + %0 = memref.alloc() : memref<256xf32> %c0_0 = arith.constant 0 : index %c128 = arith.constant 128 : index - %1 = alloc() : memref<2x32xf32, 1> - %2 = alloc() : memref<2x1xf32> + %1 = memref.alloc() : memref<2x32xf32, 1> + %2 = memref.alloc() : memref<2x1xf32> affine.dma_start %0[%c0], %1[%c0 mod 2, %c0], %2[%c0 mod 2, symbol(%c0_0)], %c128 : memref<256xf32>, memref<2x32xf32, 1>, memref<2x1xf32> affine.for %arg0 = 1 to 8 { affine.dma_start %0[%arg0], %1[%arg0 mod 2, %arg0], %2[%arg0 mod 2, symbol(%c0_0)], %c128 : memref<256xf32>, memref<2x32xf32, 1>, memref<2x1xf32> @@ -207,8 +207,8 @@ %6 = affine.load %1[%3 mod 2, %3] : memref<2x32xf32, 1> %7 = "compute"(%6) : (f32) -> f32 affine.store %7, %1[%3 mod 2, %3] : memref<2x32xf32, 1> - dealloc %2 : memref<2x1xf32> - dealloc %1 : memref<2x32xf32, 1> + memref.dealloc %2 : memref<2x1xf32> + memref.dealloc %1 : memref<2x32xf32, 1> return } } @@ -435,7 +435,7 @@ %p = arith.mulf %a, %a : f64 affine.store %p, %A[%arg3] : memref<16xf64, #tile> } - %c = alloc() : memref<16xf64, #tile> + %c = memref.alloc() : memref<16xf64, #tile> %d = affine.load %c[0] : memref<16xf64, #tile> return %A: memref<16xf64, #tile> } @@ -451,7 +451,7 @@ %4 = arith.mulf %3, %3 : f64 affine.store %4, %arg0[%arg3 floordiv 4, %arg3 mod 4]: memref<4x4xf64> } - %0 = alloc() : memref<4x4xf64> + %0 = memref.alloc() : memref<4x4xf64> %1 = affine.apply #map1() %2 = affine.load %0[0, 0] : memref<4x4xf64> return %arg0 : memref<4x4xf64> diff --git a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp --- a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp @@ -388,9 +388,9 @@ /// produces this standard innermost-loop vectorized code: /// ```mlir /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 { -/// %0 = alloc(%arg0, %arg1) : memref -/// %1 = alloc(%arg0, %arg1) : memref -/// %2 = alloc(%arg0, %arg1) : memref +/// %0 = memref.alloc(%arg0, %arg1) : memref +/// %1 = memref.alloc(%arg0, %arg1) : memref +/// %2 = memref.alloc(%arg0, %arg1) : memref /// %cst = arith.constant 1.0 : f32 /// %cst_0 = arith.constant 2.0 : f32 /// affine.for %i0 = 0 to %arg0 { @@ -442,9 +442,9 @@ /// produces this more interesting mixed outer-innermost-loop vectorized code: /// ```mlir /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 { -/// %0 = alloc(%arg0, %arg1) : memref -/// %1 = alloc(%arg0, %arg1) : memref -/// %2 = alloc(%arg0, %arg1) : memref +/// %0 = memref.alloc(%arg0, %arg1) : memref +/// %1 = memref.alloc(%arg0, %arg1) : memref +/// %2 = memref.alloc(%arg0, %arg1) : memref /// %cst = arith.constant 1.0 : f32 /// %cst_0 = arith.constant 2.0 : f32 /// affine.for %i0 = 0 to %arg0 step 32 {