diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td @@ -107,7 +107,7 @@ } def AffineForOp : Affine_Op<"for", - [ImplicitAffineTerminator, RecursiveSideEffects, + [AutomaticAllocationScope, ImplicitAffineTerminator, RecursiveSideEffects, DeclareOpInterfaceMethods]> { let summary = "for operation"; let description = [{ @@ -608,7 +608,7 @@ } def AffineParallelOp : Affine_Op<"parallel", - [ImplicitAffineTerminator, RecursiveSideEffects, + [AutomaticAllocationScope, ImplicitAffineTerminator, RecursiveSideEffects, DeclareOpInterfaceMethods, MemRefsNormalizable]> { let summary = "multi-index parallel band operation"; let description = [{ diff --git a/mlir/include/mlir/Dialect/GPU/GPUOps.td b/mlir/include/mlir/Dialect/GPU/GPUOps.td --- a/mlir/include/mlir/Dialect/GPU/GPUOps.td +++ b/mlir/include/mlir/Dialect/GPU/GPUOps.td @@ -439,7 +439,7 @@ let hasVerifier = 1; } -def GPU_LaunchOp : GPU_Op<"launch">, +def GPU_LaunchOp : GPU_Op<"launch", [AutomaticAllocationScope]>, Arguments<(ins Index:$gridSizeX, Index:$gridSizeY, Index:$gridSizeZ, Index:$blockSizeX, Index:$blockSizeY, Index:$blockSizeZ, Optional:$dynamicSharedMemorySize)>, diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td @@ -80,7 +80,8 @@ def ClauseDefaultAttr : EnumAttr; -def ParallelOp : OpenMP_Op<"parallel", [AttrSizedOperandSegments, +def ParallelOp : OpenMP_Op<"parallel", [AutomaticAllocationScope, + AttrSizedOperandSegments, DeclareOpInterfaceMethods]> { let summary = "parallel construct"; let description = [{ diff --git a/mlir/include/mlir/Dialect/SCF/SCFOps.td b/mlir/include/mlir/Dialect/SCF/SCFOps.td --- a/mlir/include/mlir/Dialect/SCF/SCFOps.td +++ b/mlir/include/mlir/Dialect/SCF/SCFOps.td @@ -110,7 +110,7 @@ } def ForOp : SCF_Op<"for", - [DeclareOpInterfaceMethods, + [AutomaticAllocationScope, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, SingleBlockImplicitTerminator<"scf::YieldOp">, RecursiveSideEffects]> { @@ -404,7 +404,8 @@ } def ParallelOp : SCF_Op<"parallel", - [AttrSizedOperandSegments, + [AutomaticAllocationScope, + AttrSizedOperandSegments, DeclareOpInterfaceMethods, RecursiveSideEffects, SingleBlockImplicitTerminator<"scf::YieldOp">]> { diff --git a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir --- a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir +++ b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir @@ -83,7 +83,6 @@ // CHECK-LABEL: func @materialize_read(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) { func @materialize_read(%M: index, %N: index, %O: index, %P: index) { %f0 = arith.constant 0.0: f32 - // CHECK-DAG: %[[ALLOC:.*]] = memref.alloca() : memref> // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index @@ -94,6 +93,7 @@ // CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %{{.*}} { // CHECK-NEXT: affine.for %[[I2:.*]] = 0 to %{{.*}} { // CHECK-NEXT: affine.for %[[I3:.*]] = 0 to %{{.*}} step 5 { + // CHECK: %[[ALLOC:.*]] = memref.alloca() : memref> // CHECK: scf.for %[[I4:.*]] = %[[C0]] to %[[C5]] step %[[C1]] { // CHECK: scf.if // CHECK: %[[L3:.*]] = affine.apply #[[$ADD]](%[[I3]], %[[I4]]) @@ -149,7 +149,6 @@ // CHECK-LABEL:func @materialize_write(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) { func @materialize_write(%M: index, %N: index, %O: index, %P: index) { - // CHECK-DAG: %[[ALLOC:.*]] = memref.alloca() : memref> // CHECK-DAG: %{{.*}} = arith.constant dense<1.000000e+00> : vector<5x4x3xf32> // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index @@ -161,6 +160,7 @@ // CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %{{.*}} step 4 { // CHECK-NEXT: affine.for %[[I2:.*]] = 0 to %{{.*}} { // CHECK-NEXT: affine.for %[[I3:.*]] = 0 to %{{.*}} step 5 { + // CHECK: %[[ALLOC:.*]] = memref.alloca() : memref> // CHECK: memref.store %{{.*}}, %[[ALLOC]][] : memref> // CHECK: %[[VECTOR_VIEW1:.*]] = vector.type_cast %[[ALLOC]] : memref> to memref<5xvector<4x3xf32>> // CHECK: scf.for %[[I4:.*]] = %[[C0]] to %[[C5]] step %[[C1]] {