diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp --- a/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp @@ -63,7 +63,7 @@ static bool isMemRefDereferencingOp(Operation &op) { // TODO: Support DMA Ops. - return isa(op); + return isa(op); } // Returns true if the individual op is loop invariant. @@ -85,9 +85,9 @@ return false; } else if (!isa(op)) { if (isMemRefDereferencingOp(op)) { - Value memref = isa(op) - ? cast(op).getMemRef() - : cast(op).getMemRef(); + Value memref = isa(op) + ? cast(op).getMemRef() + : cast(op).getMemRef(); for (auto *user : memref.getUsers()) { // If this memref has a user that is a DMA, give up because these // operations write to this memref. @@ -97,8 +97,9 @@ // If the memref used by the load/store is used in a store elsewhere in // the loop nest, we do not hoist. Similarly, if the memref used in a // load is also being stored too, we do not hoist the load. - if (isa(user) || - (isa(user) && isa(op))) { + if (isa(user) || + (isa(user) && + isa(op))) { if (&op != user) { SmallVector userIVs; getLoopIVs(*user, &userIVs); diff --git a/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir b/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir --- a/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir +++ b/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir @@ -505,3 +505,51 @@ return } + +// ----- + +// CHECK-LABEL: func @vector_loop_nothing_invariant +func @vector_loop_nothing_invariant() { + %m1 = alloc() : memref<40xf32> + %m2 = alloc() : memref<40xf32> + affine.for %arg0 = 0 to 10 { + %v0 = affine.vector_load %m1[%arg0*4] : memref<40xf32>, vector<4xf32> + %v1 = affine.vector_load %m2[%arg0*4] : memref<40xf32>, vector<4xf32> + %v2 = addf %v0, %v1 : vector<4xf32> + affine.vector_store %v2, %m1[%arg0*4] : memref<40xf32>, vector<4xf32> + } + return +} + +// CHECK: affine.for +// CHECK-NEXT: affine.vector_load +// CHECK-NEXT: affine.vector_load +// CHECK-NEXT: addf +// CHECK-NEXT: affine.vector_store +// CHECK-NEXT: } + +// ----- + +// CHECK-LABEL: func @vector_loop_all_invariant +func @vector_loop_all_invariant() { + %m1 = alloc() : memref<4xf32> + %m2 = alloc() : memref<4xf32> + %m3 = alloc() : memref<4xf32> + affine.for %arg0 = 0 to 10 { + %v0 = affine.vector_load %m1[0] : memref<4xf32>, vector<4xf32> + %v1 = affine.vector_load %m2[0] : memref<4xf32>, vector<4xf32> + %v2 = addf %v0, %v1 : vector<4xf32> + affine.vector_store %v2, %m3[0] : memref<4xf32>, vector<4xf32> + } + return +} + +// CHECK: alloc() +// CHECK-NEXT: alloc() +// CHECK-NEXT: alloc() +// CHECK-NEXT: affine.vector_load +// CHECK-NEXT: affine.vector_load +// CHECK-NEXT: addf +// CHECK-NEXT: affine.vector_store +// CHECK-NEXT: affine.for +// CHECK-NEXT: }