diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -791,6 +791,16 @@ return TransferWriteOpOperandAdaptor(operands); } +bool isMinorIdentity(AffineMap map, unsigned rank) { + if (map.getNumResults() < rank) + return false; + unsigned startDim = map.getNumDims() - rank; + for (unsigned i = 0; i < rank; ++i) + if (map.getResult(i) != getAffineDimExpr(startDim + i, map.getContext())) + return false; + return true; +} + /// Conversion pattern that converts a 1-D vector transfer read/write op in a /// sequence of: /// 1. Bitcast to vector form. @@ -810,9 +820,12 @@ ConversionPatternRewriter &rewriter) const override { auto xferOp = cast(op); auto adaptor = getTransferOpAdapter(xferOp, operands); - if (xferOp.getMemRefType().getRank() != 1) + + if (xferOp.getVectorType().getRank() > 1 || + llvm::size(xferOp.indices()) == 0) return failure(); - if (!xferOp.permutation_map().isIdentity()) + if (!isMinorIdentity(xferOp.permutation_map(), + xferOp.getVectorType().getRank())) return failure(); auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); }; @@ -844,17 +857,18 @@ loc, toLLVMTy(vectorCmpType), linearIndices); // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ]. - Value offsetIndex = *(xferOp.indices().begin()); - offsetIndex = rewriter.create( - loc, vectorCmpType.getElementType(), offsetIndex); + // TODO(ntv, ajcbik): when the leaf transfer rank is k > 1 we need the last + // `k` dimensions here. + unsigned lastIndex = llvm::size(xferOp.indices()) - 1; + Value offsetIndex = *(xferOp.indices().begin() + lastIndex); + offsetIndex = rewriter.create(loc, i64Type, offsetIndex); Value base = rewriter.create(loc, vectorCmpType, offsetIndex); Value offsetVector = rewriter.create(loc, base, linearIndices); // 4. Let dim the memref dimension, compute the vector comparison mask: // [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ] - Value dim = rewriter.create(loc, xferOp.memref(), 0); - dim = - rewriter.create(loc, vectorCmpType.getElementType(), dim); + Value dim = rewriter.create(loc, xferOp.memref(), lastIndex); + dim = rewriter.create(loc, i64Type, dim); dim = rewriter.create(loc, vectorCmpType, dim); Value mask = rewriter.create(loc, CmpIPredicate::slt, offsetVector, dim); diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -828,3 +828,39 @@ // CHECK: llvm.intr.masked.store %[[loaded]], %[[vecPtr_b]], %[[mask_b]] // CHECK-SAME: {alignment = 1 : i32} : // CHECK-SAME: !llvm<"<17 x float>">, !llvm<"<17 x i1>"> into !llvm<"<17 x float>*"> + +func @transfer_read_2d_to_1d(%A : memref, %base0: index, %base1: index) -> vector<17xf32> { + %f7 = constant 7.0: f32 + %f = vector.transfer_read %A[%base0, %base1], %f7 + {permutation_map = affine_map<(d0, d1) -> (d1)>} : + memref, vector<17xf32> + return %f: vector<17xf32> +} +// CHECK-LABEL: func @transfer_read_2d_to_1d +// CHECK-SAME: %[[BASE_0:[a-zA-Z0-9]*]]: !llvm.i64, %[[BASE_1:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm<"<17 x float>"> +// +// Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ]. +// CHECK: %[[offsetVec:.*]] = llvm.mlir.undef : !llvm<"<17 x i64>"> +// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 +// Here we check we properly use %BASE_1 +// CHECK: %[[offsetVec2:.*]] = llvm.insertelement %[[BASE_1]], %[[offsetVec]][%[[c0]] : +// CHECK-SAME: !llvm.i32] : !llvm<"<17 x i64>"> +// CHECK: %[[offsetVec3:.*]] = llvm.shufflevector %[[offsetVec2]], %{{.*}} [ +// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, +// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, +// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32] : +// +// Let dim the memref dimension, compute the vector comparison mask: +// [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ] +// Here we check we properly use %DIM[1] +// CHECK: %[[DIM:.*]] = llvm.extractvalue %{{.*}}[3, 1] : +// CHECK-SAME: !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> +// CHECK: %[[dimVec:.*]] = llvm.mlir.undef : !llvm<"<17 x i64>"> +// CHECK: %[[c01:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 +// CHECK: %[[dimVec2:.*]] = llvm.insertelement %[[DIM]], %[[dimVec]][%[[c01]] : +// CHECK-SAME: !llvm.i32] : !llvm<"<17 x i64>"> +// CHECK: %[[dimVec3:.*]] = llvm.shufflevector %[[dimVec2]], %{{.*}} [ +// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, +// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, +// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32] : +// CHECK-SAME: !llvm<"<17 x i64>">, !llvm<"<17 x i64>">