diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp --- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp +++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp @@ -35,6 +35,8 @@ #include "mlir/Pass/Pass.h" #include "mlir/Transforms/Passes.h" +#define ALIGNMENT_SIZE 128 + using namespace mlir; using namespace mlir::edsc; using namespace mlir::edsc::intrinsics; @@ -232,8 +234,8 @@ op->getParentWithTrait(); assert(scope && "Expected op to be inside automatic allocation scope"); b.setInsertionPointToStart(&scope->getRegion(0).front()); - Value res = - std_alloca(memRefMinorVectorType, ValueRange{}, b.getI64IntegerAttr(128)); + Value res = std_alloca(memRefMinorVectorType, ValueRange{}, + b.getI64IntegerAttr(ALIGNMENT_SIZE)); return res; } @@ -575,7 +577,8 @@ steps.push_back(std_constant_index(step)); // 2. Emit alloc-copy-load-dealloc. - Value tmp = std_alloc(tmpMemRefType(transfer)); + Value tmp = std_alloc(tmpMemRefType(transfer), ValueRange{}, + rewriter.getI64IntegerAttr(ALIGNMENT_SIZE)); StdIndexedValue local(tmp); Value vec = vector_type_cast(tmp); loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { @@ -648,7 +651,8 @@ steps.push_back(std_constant_index(step)); // 2. Emit alloc-store-copy-dealloc. - Value tmp = std_alloc(tmpMemRefType(transfer)); + Value tmp = std_alloc(tmpMemRefType(transfer), ValueRange{}, + rewriter.getI64IntegerAttr(ALIGNMENT_SIZE)); StdIndexedValue local(tmp); Value vec = vector_type_cast(tmp); std_store(vectorValue, vec); diff --git a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir --- a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir +++ b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir @@ -68,7 +68,7 @@ // CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %{{.*}} { // CHECK-NEXT: affine.for %[[I2:.*]] = 0 to %{{.*}} { // CHECK-NEXT: affine.for %[[I3:.*]] = 0 to %{{.*}} step 5 { - // CHECK: %[[ALLOC:.*]] = alloc() : memref<5x4x3xf32> + // CHECK: %[[ALLOC:.*]] = alloc() {alignment = 128 : i64} : memref<5x4x3xf32> // CHECK-NEXT: scf.for %[[I4:.*]] = %[[C0]] to %[[C3]] step %[[C1]] { // CHECK-NEXT: scf.for %[[I5:.*]] = %[[C0]] to %[[C4]] step %[[C1]] { // CHECK-NEXT: scf.for %[[I6:.*]] = %[[C0]] to %[[C5]] step %[[C1]] { @@ -145,7 +145,7 @@ // CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %{{.*}} step 4 { // CHECK-NEXT: affine.for %[[I2:.*]] = 0 to %{{.*}} { // CHECK-NEXT: affine.for %[[I3:.*]] = 0 to %{{.*}} step 5 { - // CHECK: %[[ALLOC:.*]] = alloc() : memref<5x4x3xf32> + // CHECK: %[[ALLOC:.*]] = alloc() {alignment = 128 : i64} : memref<5x4x3xf32> // CHECK-NEXT: %[[VECTOR_VIEW:.*]] = vector.type_cast {{.*}} : memref<5x4x3xf32> // CHECK: store %{{.*}}, {{.*}} : memref> // CHECK-NEXT: scf.for %[[I4:.*]] = %[[C0]] to %[[C3]] step %[[C1]] {