diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp --- a/mlir/lib/Analysis/Utils.cpp +++ b/mlir/lib/Analysis/Utils.cpp @@ -1173,6 +1173,12 @@ /// Returns true if 'forOp' is parallel. bool mlir::isLoopParallel(AffineForOp forOp) { + // Loop is not parallel if it has SSA loop-carried dependences. + // TODO: Conditionally support reductions and other loop-carried dependences + // that could be handled in the context of a parallel loop. + if (forOp.getNumIterOperands() > 0) + return false; + // Collect all load and store ops in loop nest rooted at 'forOp'. SmallVector loadAndStoreOpInsts; auto walkResult = forOp.walk([&](Operation *opInst) -> WalkResult { diff --git a/mlir/test/Dialect/Affine/parallelize.mlir b/mlir/test/Dialect/Affine/parallelize.mlir --- a/mlir/test/Dialect/Affine/parallelize.mlir +++ b/mlir/test/Dialect/Affine/parallelize.mlir @@ -159,4 +159,14 @@ return } - +// CHECK-LABEL: @unsupported_iter_args +func @unsupported_iter_args(%arg0: memref<10xf32>) { + %cst = constant 0.000000e+00 : f32 + // CHECK-NOT: affine.parallel + %final_red = affine.for %arg4 = 0 to 10 iter_args(%red_iter = %cst) -> (f32) { + %5 = affine.load %arg0[%arg4] : memref<10xf32> + %6 = addf %red_iter, %5 : f32 + affine.yield %6 : f32 + } + return +}