diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp --- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp @@ -2943,8 +2943,10 @@ // Field 1: Copy the allocated pointer, used for malloc/free. Value allocatedPtr = sourceMemRef.allocatedPtr(rewriter, loc); + auto memRefType = viewOp.source().getType().cast(); Value bitcastPtr = rewriter.create( - loc, targetElementTy.getPointerTo(), allocatedPtr); + loc, targetElementTy.getPointerTo(memRefType.getMemorySpace()), + allocatedPtr); targetMemRef.setAllocatedPtr(rewriter, loc, bitcastPtr); // Field 2: Copy the actual aligned pointer to payload. @@ -2952,7 +2954,8 @@ alignedPtr = rewriter.create(loc, alignedPtr.getType(), alignedPtr, adaptor.byte_shift()); bitcastPtr = rewriter.create( - loc, targetElementTy.getPointerTo(), alignedPtr); + loc, targetElementTy.getPointerTo(memRefType.getMemorySpace()), + alignedPtr); targetMemRef.setAlignedPtr(rewriter, loc, bitcastPtr); // Field 3: The offset in the resulting type must be 0. This is because of diff --git a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir --- a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir @@ -824,6 +824,28 @@ // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> %5 = view %0[%arg2][] : memref<2048xi8> to memref<64x4xf32> + // Test view address space. + // CHECK: llvm.mlir.constant(2048 : index) : !llvm.i64 + // CHECK: llvm.mlir.undef : !llvm<"{ i8 addrspace(4)*, i8 addrspace(4)*, i64, [1 x i64], [1 x i64] }"> + %6 = alloc() : memref<2048xi8, 4> + + // CHECK: llvm.mlir.undef : !llvm<"{ float addrspace(4)*, float addrspace(4)*, i64, [2 x i64], [2 x i64] }"> + // CHECK: %[[BASE_PTR_4:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm<"{ i8 addrspace(4)*, i8 addrspace(4)*, i64, [1 x i64], [1 x i64] }"> + // CHECK: %[[SHIFTED_BASE_PTR_4:.*]] = llvm.getelementptr %[[BASE_PTR_4]][%[[ARG2]]] : (!llvm<"i8 addrspace(4)*">, !llvm.i64) -> !llvm<"i8 addrspace(4)*"> + // CHECK: %[[CAST_SHIFTED_BASE_PTR_4:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_4]] : !llvm<"i8 addrspace(4)*"> to !llvm<"float addrspace(4)*"> + // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_4]], %{{.*}}[1] : !llvm<"{ float addrspace(4)*, float addrspace(4)*, i64, [2 x i64], [2 x i64] }"> + // CHECK: %[[C0_4:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: llvm.insertvalue %[[C0_4]], %{{.*}}[2] : !llvm<"{ float addrspace(4)*, float addrspace(4)*, i64, [2 x i64], [2 x i64] }"> + // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 + // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm<"{ float addrspace(4)*, float addrspace(4)*, i64, [2 x i64], [2 x i64] }"> + // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 + // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm<"{ float addrspace(4)*, float addrspace(4)*, i64, [2 x i64], [2 x i64] }"> + // CHECK: llvm.mlir.constant(64 : index) : !llvm.i64 + // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm<"{ float addrspace(4)*, float addrspace(4)*, i64, [2 x i64], [2 x i64] }"> + // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 + // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float addrspace(4)*, float addrspace(4)*, i64, [2 x i64], [2 x i64] }"> + %7 = view %6[%arg2][] : memref<2048xi8, 4> to memref<64x4xf32, 4> + return }