Index: mlir/test/Dialect/LLVM/lower-to-llvm-e2e.mlir =================================================================== --- mlir/test/Dialect/LLVM/lower-to-llvm-e2e.mlir +++ mlir/test/Dialect/LLVM/lower-to-llvm-e2e.mlir @@ -6,8 +6,8 @@ func.func @subview(%0 : memref<64x4xf32, strided<[4, 1], offset: 0>>, %arg0 : index, %arg1 : index, %arg2 : index) -> memref> { // CHECK-LABEL: @subview - // CHECK-SAME: %[[BASE:[^:]*]]: !llvm.ptr, - // CHECK-SAME: %[[BASE_ALIGNED:[^:]*]]: !llvm.ptr, + // CHECK-SAME: %[[BASE:[^:]*]]: !llvm.ptr + // CHECK-SAME: %[[BASE_ALIGNED:[^:]*]]: !llvm.ptr, // CHECK-SAME: %[[BASE_OFFSET:[^:]*]]: i64, // CHECK-SAME: %[[BASE_STRIDE0:[^:]*]]: i64, // CHECK-SAME: %[[BASE_STRIDE1:[^:]*]]: i64, @@ -16,27 +16,27 @@ // CHECK-SAME: %[[ARG0:[^:]*]]: i64, // CHECK-SAME: %[[ARG1:[^:]*]]: i64, // CHECK-SAME: %[[ARG2:[^:]*]]: i64) - // CHECK-SAME: -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64> + // CHECK-SAME: -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64> // CHECK-DAG: %[[STRIDE0:.*]] = llvm.mlir.constant(4 : index) : i64 // CHECK-DAG: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64 // CHECK-DAG: %[[OFF2:.*]] = llvm.add %[[DESCSTRIDE0]], %[[ARG1]] : i64 - // CHECK-DAG: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> + // CHECK-DAG: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // Base address and algined address. - // CHECK-DAG: %[[DESC0:.*]] = llvm.insertvalue %[[BASE]], %[[DESC]][0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK-DAG: %[[DESC1:.*]] = llvm.insertvalue %[[BASE_ALIGNED]], %[[DESC0]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> + // CHECK-DAG: %[[DESC0:.*]] = llvm.insertvalue %[[BASE]], %[[DESC]][0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> + // CHECK-DAG: %[[DESC1:.*]] = llvm.insertvalue %[[BASE_ALIGNED]], %[[DESC0]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // Offset. - // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> + // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // Size 0. - // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC2]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> + // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC2]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // Stride 0 == 4 * %arg0. - // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC3]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> + // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC3]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // Size 1. - // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC4]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> + // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC4]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // Stride 1 == 1 * %arg1. - // CHECK: %[[DESC6:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC5]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> + // CHECK: %[[DESC6:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC5]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> %1 = memref.subview %0[%arg0, %arg1][%arg0, %arg1][%arg0, %arg1] : memref<64x4xf32, strided<[4, 1], offset: 0>> Index: mlir/test/lib/Dialect/LLVM/TestLowerToLLVM.cpp =================================================================== --- mlir/test/lib/Dialect/LLVM/TestLowerToLLVM.cpp +++ mlir/test/lib/Dialect/LLVM/TestLowerToLLVM.cpp @@ -49,6 +49,11 @@ // unrealized casts, but there needs to be the final module-wise cleanup in // the end. Keep module-level for now. + auto enableOpaquePointers = [](auto options) { + options.useOpaquePointers = true; + return options; + }; + // Blanket-convert any remaining high-level vector ops to loops if any remain. pm.addNestedPass(createConvertVectorToSCFPass()); // Blanket-convert any remaining linalg ops to loops if any remain. @@ -65,7 +70,8 @@ // Convert vector to LLVM (always needed). pm.addPass(createConvertVectorToLLVMPass( // TODO: add more options on a per-need basis. - ConvertVectorToLLVMPassOptions{options.reassociateFPReductions})); + enableOpaquePointers( + ConvertVectorToLLVMPassOptions{options.reassociateFPReductions}))); // Convert Math to LLVM (always needed). pm.addNestedPass(createConvertMathToLLVMPass()); // Expand complicated MemRef operations before lowering them. @@ -73,9 +79,11 @@ // The expansion may create affine expressions. Get rid of them. pm.addPass(createLowerAffinePass()); // Convert MemRef to LLVM (always needed). - pm.addPass(createFinalizeMemRefToLLVMConversionPass()); + pm.addPass(createFinalizeMemRefToLLVMConversionPass( + enableOpaquePointers(FinalizeMemRefToLLVMConversionPassOptions{}))); // Convert Func to LLVM (always needed). - pm.addPass(createConvertFuncToLLVMPass()); + pm.addPass(createConvertFuncToLLVMPass( + enableOpaquePointers(ConvertFuncToLLVMPassOptions{}))); // Convert Index to LLVM (always needed). pm.addPass(createConvertIndexToLLVMPass()); // Convert remaining unrealized_casts (always needed).