diff --git a/mlir/include/mlir/ExecutionEngine/RunnerUtils.h b/mlir/include/mlir/ExecutionEngine/RunnerUtils.h --- a/mlir/include/mlir/ExecutionEngine/RunnerUtils.h +++ b/mlir/include/mlir/ExecutionEngine/RunnerUtils.h @@ -341,49 +341,54 @@ // Currently exposed C API. //////////////////////////////////////////////////////////////////////////////// extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemrefShapeI8(UnrankedMemRefType *m); +_mlir_ciface_printMemrefShapeI8(UnrankedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemrefShapeI32(UnrankedMemRefType *m); +_mlir_ciface_printMemrefShapeI32(UnrankedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemrefShapeI64(UnrankedMemRefType *m); +_mlir_ciface_printMemrefShapeI64(UnrankedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemrefShapeF32(UnrankedMemRefType *m); +_mlir_ciface_printMemrefShapeF32(UnrankedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemrefShapeF64(UnrankedMemRefType *m); +_mlir_ciface_printMemrefShapeF64(UnrankedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemrefI8(UnrankedMemRefType *m); +_mlir_ciface_printMemrefI8(UnrankedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemrefI32(UnrankedMemRefType *m); +_mlir_ciface_printMemrefI32(UnrankedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemrefF32(UnrankedMemRefType *m); +_mlir_ciface_printMemrefI64(UnrankedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemrefF64(UnrankedMemRefType *m); +_mlir_ciface_printMemrefF32(UnrankedMemRefType *m); +extern "C" MLIR_RUNNERUTILS_EXPORT void +_mlir_ciface_printMemrefF64(UnrankedMemRefType *m); + +extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_nanoTime(); extern "C" MLIR_RUNNERUTILS_EXPORT void printMemrefI32(int64_t rank, void *ptr); extern "C" MLIR_RUNNERUTILS_EXPORT void printMemrefI64(int64_t rank, void *ptr); extern "C" MLIR_RUNNERUTILS_EXPORT void printMemrefF32(int64_t rank, void *ptr); extern "C" MLIR_RUNNERUTILS_EXPORT void printMemrefF64(int64_t rank, void *ptr); +extern "C" MLIR_RUNNERUTILS_EXPORT void printCString(char *str); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemref0dF32(StridedMemRefType *m); +_mlir_ciface_printMemref0dF32(StridedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemref1dF32(StridedMemRefType *m); +_mlir_ciface_printMemref1dF32(StridedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemref2dF32(StridedMemRefType *m); +_mlir_ciface_printMemref2dF32(StridedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemref3dF32(StridedMemRefType *m); +_mlir_ciface_printMemref3dF32(StridedMemRefType *m); extern "C" MLIR_RUNNERUTILS_EXPORT void -mlirCifacePrintMemref4dF32(StridedMemRefType *m); +_mlir_ciface_printMemref4dF32(StridedMemRefType *m); -extern "C" MLIR_RUNNERUTILS_EXPORT void mlirCifacePrintMemrefVector4x4xf32( +extern "C" MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefVector4x4xf32( StridedMemRefType, 2> *m); -extern "C" MLIR_RUNNERUTILS_EXPORT int64_t mlirCifaceVerifyMemRefI32( +extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefI32( UnrankedMemRefType *actual, UnrankedMemRefType *expected); -extern "C" MLIR_RUNNERUTILS_EXPORT int64_t mlirCifaceVerifyMemRefF32( +extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefF32( UnrankedMemRefType *actual, UnrankedMemRefType *expected); -extern "C" MLIR_RUNNERUTILS_EXPORT int64_t mlirCifaceVerifyMemRefF64( +extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefF64( UnrankedMemRefType *actual, UnrankedMemRefType *expected); extern "C" MLIR_RUNNERUTILS_EXPORT int64_t verifyMemRefI32(int64_t rank, diff --git a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp --- a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp +++ b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp @@ -45,9 +45,8 @@ extern "C" void printComma() { fputs(", ", stdout); } extern "C" void printNewline() { fputc('\n', stdout); } -extern "C" MLIR_CRUNNERUTILS_EXPORT void -memrefCopy(int64_t elemSize, UnrankedMemRefType *srcArg, - UnrankedMemRefType *dstArg) { +extern "C" void memrefCopy(int64_t elemSize, UnrankedMemRefType *srcArg, + UnrankedMemRefType *dstArg) { DynamicMemRefType src(*srcArg); DynamicMemRefType dst(*dstArg); @@ -104,7 +103,7 @@ } /// Prints GFLOPS rating. -extern "C" void print_flops(double flops) { +extern "C" void printFlops(double flops) { fprintf(stderr, "%lf GFLOPS\n", flops / 1.0E9); } diff --git a/mlir/lib/ExecutionEngine/RunnerUtils.cpp b/mlir/lib/ExecutionEngine/RunnerUtils.cpp --- a/mlir/lib/ExecutionEngine/RunnerUtils.cpp +++ b/mlir/lib/ExecutionEngine/RunnerUtils.cpp @@ -18,67 +18,65 @@ // NOLINTBEGIN(*-identifier-naming) -extern "C" void -_mlir_ciface_print_memref_shape_i8(UnrankedMemRefType *M) { +extern "C" void _mlir_ciface_printMemrefShapeI8(UnrankedMemRefType *M) { std::cout << "Unranked Memref "; printMemRefMetaData(std::cout, DynamicMemRefType(*M)); std::cout << "\n"; } extern "C" void -_mlir_ciface_print_memref_shape_i32(UnrankedMemRefType *M) { +_mlir_ciface_printMemrefShapeI32(UnrankedMemRefType *M) { std::cout << "Unranked Memref "; printMemRefMetaData(std::cout, DynamicMemRefType(*M)); std::cout << "\n"; } extern "C" void -_mlir_ciface_print_memref_shape_i64(UnrankedMemRefType *M) { +_mlir_ciface_printMemrefShapeI64(UnrankedMemRefType *M) { std::cout << "Unranked Memref "; printMemRefMetaData(std::cout, DynamicMemRefType(*M)); std::cout << "\n"; } -extern "C" void -_mlir_ciface_print_memref_shape_f32(UnrankedMemRefType *M) { +extern "C" void _mlir_ciface_printMemrefShapeF32(UnrankedMemRefType *M) { std::cout << "Unranked Memref "; printMemRefMetaData(std::cout, DynamicMemRefType(*M)); std::cout << "\n"; } extern "C" void -_mlir_ciface_print_memref_shape_f64(UnrankedMemRefType *M) { +_mlir_ciface_printMemrefShapeF64(UnrankedMemRefType *M) { std::cout << "Unranked Memref "; printMemRefMetaData(std::cout, DynamicMemRefType(*M)); std::cout << "\n"; } -extern "C" void _mlir_ciface_print_memref_vector_4x4xf32( +extern "C" void _mlir_ciface_printMemrefVector4x4xf32( StridedMemRefType, 2> *M) { impl::printMemRef(*M); } -extern "C" void _mlir_ciface_print_memref_i8(UnrankedMemRefType *M) { +extern "C" void _mlir_ciface_printMemrefI8(UnrankedMemRefType *M) { impl::printMemRef(*M); } -extern "C" void _mlir_ciface_print_memref_i32(UnrankedMemRefType *M) { +extern "C" void _mlir_ciface_printMemrefI32(UnrankedMemRefType *M) { impl::printMemRef(*M); } -extern "C" void _mlir_ciface_print_memref_i64(UnrankedMemRefType *M) { +extern "C" void _mlir_ciface_printMemrefI64(UnrankedMemRefType *M) { impl::printMemRef(*M); } -extern "C" void _mlir_ciface_print_memref_f32(UnrankedMemRefType *M) { +extern "C" void _mlir_ciface_printMemrefF32(UnrankedMemRefType *M) { impl::printMemRef(*M); } -extern "C" void _mlir_ciface_print_memref_f64(UnrankedMemRefType *M) { +extern "C" void _mlir_ciface_printMemrefF64(UnrankedMemRefType *M) { impl::printMemRef(*M); } -extern "C" int64_t _mlir_ciface_nano_time() { +extern "C" int64_t _mlir_ciface_nanoTime() { auto now = std::chrono::high_resolution_clock::now(); auto duration = now.time_since_epoch(); auto nanoseconds = @@ -86,46 +84,41 @@ return nanoseconds.count(); } -extern "C" void print_memref_i32(int64_t rank, void *ptr) { +extern "C" void printMemrefI32(int64_t rank, void *ptr) { UnrankedMemRefType descriptor = {rank, ptr}; - _mlir_ciface_print_memref_i32(&descriptor); + _mlir_ciface_printMemrefI32(&descriptor); } -extern "C" void print_memref_i64(int64_t rank, void *ptr) { +extern "C" void printMemrefI64(int64_t rank, void *ptr) { UnrankedMemRefType descriptor = {rank, ptr}; - _mlir_ciface_print_memref_i64(&descriptor); + _mlir_ciface_printMemrefI64(&descriptor); } -extern "C" void print_memref_f32(int64_t rank, void *ptr) { +extern "C" void printMemrefF32(int64_t rank, void *ptr) { UnrankedMemRefType descriptor = {rank, ptr}; - _mlir_ciface_print_memref_f32(&descriptor); + _mlir_ciface_printMemrefF32(&descriptor); } -extern "C" void print_memref_f64(int64_t rank, void *ptr) { +extern "C" void printMemrefF64(int64_t rank, void *ptr) { UnrankedMemRefType descriptor = {rank, ptr}; - _mlir_ciface_print_memref_f64(&descriptor); + _mlir_ciface_printMemrefF64(&descriptor); } -extern "C" void print_c_string(char *str) { printf("%s", str); } +extern "C" void printCString(char *str) { printf("%s", str); } -extern "C" void -_mlir_ciface_print_memref_0d_f32(StridedMemRefType *M) { +extern "C" void _mlir_ciface_printMemref0dF32(StridedMemRefType *M) { impl::printMemRef(*M); } -extern "C" void -_mlir_ciface_print_memref_1d_f32(StridedMemRefType *M) { +extern "C" void _mlir_ciface_printMemref1dF32(StridedMemRefType *M) { impl::printMemRef(*M); } -extern "C" void -_mlir_ciface_print_memref_2d_f32(StridedMemRefType *M) { +extern "C" void _mlir_ciface_printMemref2dF32(StridedMemRefType *M) { impl::printMemRef(*M); } -extern "C" void -_mlir_ciface_print_memref_3d_f32(StridedMemRefType *M) { +extern "C" void _mlir_ciface_printMemref3dF32(StridedMemRefType *M) { impl::printMemRef(*M); } -extern "C" void -_mlir_ciface_print_memref_4d_f32(StridedMemRefType *M) { +extern "C" void _mlir_ciface_printMemref4dF32(StridedMemRefType *M) { impl::printMemRef(*M); } diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir @@ -423,8 +423,8 @@ // CHECK-NEXT: %[[dC:.*]] = memref.cast %[[C]] : memref to memref<*xf32> %res2 = tensor.cast %res: tensor to tensor<*xf32> - // CHECK-NEXT: call @print_memref_f32(%[[dC]]) : (memref<*xf32>) -> () - call @print_memref_f32(%res2) : (tensor<*xf32>) -> () + // CHECK-NEXT: call @printMemrefF32(%[[dC]]) : (memref<*xf32>) -> () + call @printMemrefF32(%res2) : (tensor<*xf32>) -> () // CHECK-DAG: memref.dealloc %[[A]] : memref<64xf32> // CHECK-DAG: memref.dealloc %[[B]] : memref<64xf32> @@ -433,8 +433,8 @@ return } -// CHECK: func private @print_memref_f32(memref<*xf32>) -func.func private @print_memref_f32(tensor<*xf32>) +// CHECK: func private @printMemrefF32(memref<*xf32>) +func.func private @printMemrefF32(tensor<*xf32>) // ----- diff --git a/mlir/test/Dialect/Linalg/library-calls.mlir b/mlir/test/Dialect/Linalg/library-calls.mlir --- a/mlir/test/Dialect/Linalg/library-calls.mlir +++ b/mlir/test/Dialect/Linalg/library-calls.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s -convert-linalg-to-std | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) // CHECK: func private @linalg_fill_f32_viewsxsxf32(f32, memref) attributes {llvm.emit_c_interface} // CHECK: func private @linalg_matmul_viewsxsxf32_viewsxsxf32_viewsxsxf32(memref, memref, memref) attributes {llvm.emit_c_interface} diff --git a/mlir/test/Integration/Dialect/Async/CPU/microbench-linalg-async-parallel-for.mlir b/mlir/test/Integration/Dialect/Async/CPU/microbench-linalg-async-parallel-for.mlir --- a/mlir/test/Integration/Dialect/Async/CPU/microbench-linalg-async-parallel-for.mlir +++ b/mlir/test/Integration/Dialect/Async/CPU/microbench-linalg-async-parallel-for.mlir @@ -80,7 +80,7 @@ // CHECK: [2, 2, 2, 2, 2, 2, 2, 2, 2, 2] %U = memref.cast %DST10 : memref<1x10xf32> to memref<*xf32> - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () memref.dealloc %LHS10: memref<1x10xf32> memref.dealloc %RHS10: memref<1x10xf32> @@ -130,5 +130,5 @@ func.func private @rtclock() -> f64 -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } diff --git a/mlir/test/Integration/Dialect/Async/CPU/microbench-scf-async-parallel-for.mlir b/mlir/test/Integration/Dialect/Async/CPU/microbench-scf-async-parallel-for.mlir --- a/mlir/test/Integration/Dialect/Async/CPU/microbench-scf-async-parallel-for.mlir +++ b/mlir/test/Integration/Dialect/Async/CPU/microbench-scf-async-parallel-for.mlir @@ -102,7 +102,7 @@ // CHECK: [2, 2, 2, 2, 2, 2, 2, 2, 2, 2] %U = memref.cast %DST10 : memref<1x10xf32> to memref<*xf32> - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () memref.dealloc %LHS10: memref<1x10xf32> memref.dealloc %RHS10: memref<1x10xf32> @@ -152,5 +152,5 @@ func.func private @rtclock() -> f64 -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } diff --git a/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-1d.mlir b/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-1d.mlir --- a/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-1d.mlir +++ b/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-1d.mlir @@ -83,7 +83,7 @@ memref.store %3, %A[%i] : memref<9xf32> } // CHECK: [0, 1, 2, 3, 4, 5, 6, 7, 8] - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () scf.parallel (%i) = (%lb) to (%ub) step (%c1) { memref.store %c0, %A[%i] : memref<9xf32> @@ -98,7 +98,7 @@ memref.store %3, %A[%i] : memref<9xf32> } // CHECK: [0, 0, 2, 0, 4, 0, 6, 0, 8] - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () scf.parallel (%i) = (%lb) to (%ub) step (%c1) { memref.store %c0, %A[%i] : memref<9xf32> @@ -117,7 +117,7 @@ memref.store %5, %A[%3] : memref<9xf32> } // CHECK: [-20, 0, 0, -17, 0, 0, -14, 0, 0] - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () // 4. Check that loop with zero iterations doesn't crash at runtime. %lb1 = call @zero(): () -> (index) @@ -132,4 +132,4 @@ return } -func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } diff --git a/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-2d.mlir b/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-2d.mlir --- a/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-2d.mlir +++ b/mlir/test/Integration/Dialect/Async/CPU/test-async-parallel-for-2d.mlir @@ -84,7 +84,7 @@ // CHECK-NEXT: [40, 41, 42, 43, 44, 45, 46, 47] // CHECK-NEXT: [48, 49, 50, 51, 52, 53, 54, 55] // CHECK-NEXT: [56, 57, 58, 59, 60, 61, 62, 63] - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () scf.parallel (%i, %j) = (%lb, %lb) to (%ub, %ub) step (%c1, %c1) { memref.store %c0, %A[%i, %j] : memref<8x8xf32> @@ -109,7 +109,7 @@ // CHECK-NEXT: [0, 0, 0, 0, 0, 0, 0, 0] // CHECK-NEXT: [48, 49, 50, 51, 52, 53, 54, 55] // CHECK-NEXT: [0, 0, 0, 0, 0, 0, 0, 0] - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () scf.parallel (%i, %j) = (%lb, %lb) to (%ub, %ub) step (%c1, %c1) { memref.store %c0, %A[%i, %j] : memref<8x8xf32> @@ -134,11 +134,11 @@ // CHECK-NEXT: [40, 0, 42, 0, 44, 0, 46, 0] // CHECK-NEXT: [48, 0, 50, 0, 52, 0, 54, 0] // CHECK-NEXT: [56, 0, 58, 0, 60, 0, 62, 0] - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () memref.dealloc %A : memref<8x8xf32> return } -func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/matmul-vs-matvec.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/matmul-vs-matvec.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/matmul-vs-matvec.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/matmul-vs-matvec.mlir @@ -3,7 +3,7 @@ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) func.func @matmul(%A: memref, %B: memref) -> (memref) { %c0 = arith.constant 0 : index @@ -60,7 +60,7 @@ } } %C2_ = memref.cast %C2 : memref to memref<*xf32> - call @print_memref_f32(%C2_) : (memref<*xf32>) -> () + call @printMemrefF32(%C2_) : (memref<*xf32>) -> () memref.dealloc %C1 : memref memref.dealloc %C2 : memref return diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/rank-reducing-subview.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/rank-reducing-subview.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/rank-reducing-subview.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/rank-reducing-subview.mlir @@ -3,7 +3,7 @@ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) func.func @main() { %c0 = arith.constant 0 : index @@ -21,11 +21,11 @@ %B = memref.subview %A[%c1, 0][1, %c2][1, 1] : memref to memref %C = memref.subview %A[0, %c1][%c2, 1][1, 1] : memref to memref %A_ = memref.cast %A : memref to memref<*xf32> - call @print_memref_f32(%A_) : (memref<*xf32>) -> () + call @printMemrefF32(%A_) : (memref<*xf32>) -> () %B_ = memref.cast %B : memref to memref<*xf32> - call @print_memref_f32(%B_) : (memref<*xf32>) -> () + call @printMemrefF32(%B_) : (memref<*xf32>) -> () %C_ = memref.cast %C : memref to memref<*xf32> - call @print_memref_f32(%C_) : (memref<*xf32>) -> () + call @printMemrefF32(%C_) : (memref<*xf32>) -> () memref.dealloc %A : memref return } diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir @@ -12,7 +12,7 @@ %dynamic = tensor.cast %const: tensor<2x2x3x2xf32> to tensor<2x?x?x?xf32> %collapsed = call @collapse_dynamic_shape(%dynamic) : (tensor<2x?x?x?xf32>) -> (tensor<2x?x?xf32>) %unranked = tensor.cast %collapsed: tensor<2x?x?xf32> to tensor<*xf32> - call @print_memref_f32(%unranked) : (tensor<*xf32>) -> () + call @printMemrefF32(%unranked) : (tensor<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 3 offset = 0 sizes = [2, 6, 2] strides = [12, 2, 1] data = // CHECK-NEXT{LITERAL}: [[[-3.9058, 0.9072], @@ -30,7 +30,7 @@ return } -func.func private @print_memref_f32(%ptr : tensor<*xf32>) +func.func private @printMemrefF32(%ptr : tensor<*xf32>) func.func @collapse_dynamic_shape(%arg0 : tensor<2x?x?x?xf32>) -> tensor<2x?x?xf32> { %0 = tensor.collapse_shape %arg0 [[0], [1, 2], [3]]: tensor<2x?x?x?xf32> into tensor<2x?x?xf32> diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir @@ -9,7 +9,7 @@ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) // Creates and returns a 1-D buffer of size %s1 filled with the value %f func.func @alloc_1d_filled_f32(%s1 : index, %f : f32) -> memref { @@ -39,7 +39,7 @@ memref.store %f10, %in1D[%c3] : memref call @conv_1d(%in1D, %filter1D, %out1D) : (memref, memref, memref) -> () %out1D_ = memref.cast %out1D : memref to memref<*xf32> - call @print_memref_f32(%out1D_): (memref<*xf32>) -> () + call @printMemrefF32(%out1D_): (memref<*xf32>) -> () memref.dealloc %filter1D : memref memref.dealloc %in1D : memref diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir @@ -9,7 +9,7 @@ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) // Creates and returns 3-D buffer of size (%s1, %s2, %s3) filled with the value %f func.func @alloc_3d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %f : f32) -> memref { @@ -43,7 +43,7 @@ memref.store %f10, %in1D_nwc[%c0, %c3, %c0] : memref call @conv_1d_nwc_wcf(%in1D_nwc, %filter1D_nwc, %out1D_nwc) : (memref, memref, memref) -> () %out1D_nwc_ = memref.cast %out1D_nwc : memref to memref<*xf32> - call @print_memref_f32(%out1D_nwc_): (memref<*xf32>) -> () + call @printMemrefF32(%out1D_nwc_): (memref<*xf32>) -> () memref.dealloc %filter1D_nwc : memref memref.dealloc %in1D_nwc : memref diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir @@ -9,7 +9,7 @@ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) // Creates and returns a 2-D buffer of size (%s1, %s2) filled with the value %f func.func @alloc_2d_filled_f32(%s1 : index, %s2 : index, %f : f32) -> memref { @@ -41,7 +41,7 @@ memref.store %f10, %in2D[%c0, %c3] : memref call @conv_2d(%in2D, %filter2D, %out2D) : (memref, memref, memref) -> () %out2D_ = memref.cast %out2D : memref to memref<*xf32> - call @print_memref_f32(%out2D_): (memref<*xf32>) -> () + call @printMemrefF32(%out2D_): (memref<*xf32>) -> () memref.dealloc %filter2D : memref memref.dealloc %in2D : memref diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir @@ -9,7 +9,7 @@ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) // Creates and returns 4-D buffer of size (%s1, %s2, %s3, %s4) filled with the value %f func.func @alloc_4d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %f : f32) -> memref { @@ -43,7 +43,7 @@ memref.store %f10, %in2D_nhwc[%c0, %c0, %c3, %c0] : memref call @conv_2d_nhwc_hwcf(%in2D_nhwc, %filter2D_nhwc, %out2D_nhwc) : (memref, memref, memref) -> () %out2D_nhwc_ = memref.cast %out2D_nhwc : memref to memref<*xf32> - call @print_memref_f32(%out2D_nhwc_): (memref<*xf32>) -> () + call @printMemrefF32(%out2D_nhwc_): (memref<*xf32>) -> () memref.dealloc %filter2D_nhwc : memref memref.dealloc %in2D_nhwc : memref diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir @@ -9,7 +9,7 @@ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) // Creates and returns 3-D buffer of size (%s1, %s2, %s3) filled with the value %f func.func @alloc_3d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %f : f32) -> memref { @@ -41,7 +41,7 @@ memref.store %f10, %in3D[%c0, %c0, %c3] : memref call @conv_3d(%in3D, %filter3D, %out3D) : (memref, memref, memref) -> () %out3D_ = memref.cast %out3D : memref to memref<*xf32> - call @print_memref_f32(%out3D_): (memref<*xf32>) -> () + call @printMemrefF32(%out3D_): (memref<*xf32>) -> () memref.dealloc %filter3D : memref memref.dealloc %in3D : memref diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir @@ -9,7 +9,7 @@ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) // Creates and returns 5-D buffer of size (%s1, %s2, %s3, %s4, %s5) filled with the value %f func.func @alloc_5d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %s5 : index, %f : f32) -> memref { @@ -44,7 +44,7 @@ memref.store %f10, %in3D_ndhwc[%c0, %c0, %c0, %c3, %c0] : memref call @conv_3d_ndhwc_dhwcf(%in3D_ndhwc, %filter3D_ndhwc, %out3D_ndhwc) : (memref, memref, memref) -> () %out3D_ndhwc_ = memref.cast %out3D_ndhwc : memref to memref<*xf32> - call @print_memref_f32(%out3D_ndhwc_): (memref<*xf32>) -> () + call @printMemrefF32(%out3D_ndhwc_): (memref<*xf32>) -> () memref.dealloc %filter3D_ndhwc : memref memref.dealloc %in3D_ndhwc : memref diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir @@ -13,11 +13,11 @@ %addf = arith.addf %a, %b : tensor<3xf32> %addf_unranked = tensor.cast %addf : tensor<3xf32> to tensor<*xf32> - call @print_memref_f32(%addf_unranked) : (tensor<*xf32>) -> () + call @printMemrefF32(%addf_unranked) : (tensor<*xf32>) -> () // CHECK: Unranked Memref base@ = {{.*}} rank = 1 offset = 0 sizes = [3] strides = [1] data = // CHECK-NEXT: [11, 22, 33] return } -func.func private @print_memref_f32(%ptr : tensor<*xf32>) +func.func private @printMemrefF32(%ptr : tensor<*xf32>) diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir @@ -12,7 +12,7 @@ %dynamic = tensor.cast %const: tensor<2x6x2xf32> to tensor<2x?x?xf32> %expanded = call @expand_dynamic_shape(%dynamic) : (tensor<2x?x?xf32>) -> (tensor<2x2x?x1x?xf32>) %unranked = tensor.cast %expanded: tensor<2x2x?x1x?xf32> to tensor<*xf32> - call @print_memref_f32(%unranked) : (tensor<*xf32>) -> () + call @printMemrefF32(%unranked) : (tensor<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 5 offset = 0 sizes = [2, 2, 3, 1, 2] strides = [12, 6, 2, 2, 1] data = @@ -31,7 +31,7 @@ return } -func.func private @print_memref_f32(%ptr : tensor<*xf32>) +func.func private @printMemrefF32(%ptr : tensor<*xf32>) func.func @expand_dynamic_shape(%arg0 : tensor<2x?x?xf32>) -> tensor<2x2x?x1x?xf32> { %0 = tensor.expand_shape %arg0 [[0], [1, 2, 3], [4]]: tensor<2x?x?xf32> into tensor<2x2x?x1x?xf32> diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir @@ -30,7 +30,7 @@ } // %B = tensor.cast %3 : tensor to tensor<*xf32> - // call @print_memref_f32(%B) : (tensor<*xf32>) -> () + // call @printMemrefF32(%B) : (tensor<*xf32>) -> () %4 = affine.apply #map0(%c0, %c64)[%c2] %5 = linalg.init_tensor [%4, 2] : tensor @@ -47,10 +47,10 @@ } // %A = tensor.cast %6 : tensor to tensor<*xf32> - // call @print_memref_f32(%A) : (tensor<*xf32>) -> () + // call @printMemrefF32(%A) : (tensor<*xf32>) -> () // %C = tensor.cast %0 : tensor to tensor<*xf32> - // call @print_memref_f32(%C) : (tensor<*xf32>) -> () + // call @printMemrefF32(%C) : (tensor<*xf32>) -> () %7 = scf.for %arg3 = %c0 to %c64 step %c2 iter_args(%arg4 = %0) -> (tensor) { %8 = tensor.extract_slice %arg0[%arg3] [2] [1] : tensor<64xf32> to tensor<2xf32> @@ -64,11 +64,11 @@ %16 = linalg.dot ins(%13, %15 : tensor<2xf32>, tensor<2xf32>) outs(%arg4 : tensor) -> tensor // %AA = tensor.cast %13 : tensor<2xf32> to tensor<*xf32> - // call @print_memref_f32(%AA) : (tensor<*xf32>) -> () + // call @printMemrefF32(%AA) : (tensor<*xf32>) -> () // %BB = tensor.cast %15 : tensor<2xf32> to tensor<*xf32> - // call @print_memref_f32(%BB) : (tensor<*xf32>) -> () + // call @printMemrefF32(%BB) : (tensor<*xf32>) -> () // %CC = tensor.cast %16 : tensor to tensor<*xf32> - // call @print_memref_f32(%CC) : (tensor<*xf32>) -> () + // call @printMemrefF32(%CC) : (tensor<*xf32>) -> () scf.yield %16 : tensor } @@ -94,9 +94,9 @@ // CHECK: Unranked Memref base@ = {{.*}} rank = 0 offset = 0 sizes = [] strides = [] data = // CHECK-NEXT: [128] - call @print_memref_f32(%res2) : (tensor<*xf32>) -> () + call @printMemrefF32(%res2) : (tensor<*xf32>) -> () return } -func.func private @print_memref_f32(tensor<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(tensor<*xf32>) attributes { llvm.emit_c_interface } diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir @@ -18,7 +18,7 @@ tensor.yield %cst : f32 } : tensor<1x?x3xf32> to tensor<1x?x?xf32> %unranked = tensor.cast %out: tensor<1x?x?xf32> to tensor<*xf32> - call @print_memref_f32(%unranked) : (tensor<*xf32>) -> () + call @printMemrefF32(%unranked) : (tensor<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 3 offset = 0 sizes = [1, 4, 5] strides = [20, 5, 1] data = @@ -30,4 +30,4 @@ return } -func.func private @print_memref_f32(%ptr : tensor<*xf32>) +func.func private @printMemrefF32(%ptr : tensor<*xf32>) diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir @@ -18,14 +18,14 @@ %inserted_at_position_1 = tensor.insert_slice %insert_val into %const[1][1][1] : tensor<1xf32> into tensor<2xf32> %unranked_at_position_0 = tensor.cast %inserted_at_position_0 : tensor<2xf32> to tensor<*xf32> - call @print_memref_f32(%unranked_at_position_0) : (tensor<*xf32>) -> () + call @printMemrefF32(%unranked_at_position_0) : (tensor<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 1 offset = 0 sizes = [2] strides = [1] data = // CHECK-NEXT: [20, 10] %unranked_at_position_1 = tensor.cast %inserted_at_position_1 : tensor<2xf32> to tensor<*xf32> - call @print_memref_f32(%unranked_at_position_1) : (tensor<*xf32>) -> () + call @printMemrefF32(%unranked_at_position_1) : (tensor<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 1 offset = 0 sizes = [2] strides = [1] data = @@ -34,4 +34,4 @@ return } -func.func private @print_memref_f32(%ptr : tensor<*xf32>) +func.func private @printMemrefF32(%ptr : tensor<*xf32>) diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir @@ -12,7 +12,7 @@ %inserted = tensor.insert_slice %insert_val into %const[0][1][1] : tensor<1xf32> into tensor<2xf32> %unranked = tensor.cast %inserted : tensor<2xf32> to tensor<*xf32> - call @print_memref_f32(%unranked) : (tensor<*xf32>) -> () + call @printMemrefF32(%unranked) : (tensor<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 1 offset = 0 sizes = [2] strides = [1] data = @@ -21,4 +21,4 @@ return } -func.func private @print_memref_f32(%ptr : tensor<*xf32>) +func.func private @printMemrefF32(%ptr : tensor<*xf32>) diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir @@ -14,13 +14,13 @@ %0 = call @foo() : () -> tensor<4xf32> // Instead of relying on tensor_store which introduces aliasing, we rely on - // the conversion of print_memref_f32(tensor<*xf32>) to - // print_memref_f32(memref<*xf32>). + // the conversion of printMemrefF32(tensor<*xf32>) to + // printMemrefF32(memref<*xf32>). // Note that this is skipping a step and we would need at least some function // attribute to declare that this conversion is valid (e.g. when we statically // know that things will play nicely at the C ABI boundary). %unranked = tensor.cast %0 : tensor<4xf32> to tensor<*xf32> - call @print_memref_f32(%unranked) : (tensor<*xf32>) -> () + call @printMemrefF32(%unranked) : (tensor<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 1 offset = 0 sizes = [4] strides = [1] data = @@ -33,4 +33,4 @@ // Note that this is skipping a step and we would need at least some function // attribute to declare that this conversion is valid (e.g. when we statically // know that things will play nicely at the C ABI boundary). -func.func private @print_memref_f32(%ptr : tensor<*xf32>) +func.func private @printMemrefF32(%ptr : tensor<*xf32>) diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir @@ -26,7 +26,7 @@ outs(%C: tensor<2x4xf32>) -> tensor<2x4xf32> %unranked = tensor.cast %D : tensor<2x4xf32> to tensor<*xf32> - call @print_memref_f32(%unranked) : (tensor<*xf32>) -> () + call @printMemrefF32(%unranked) : (tensor<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 2 offset = 0 sizes = [2, 4] strides = [4, 1] data = @@ -36,4 +36,4 @@ return } -func.func private @print_memref_f32(%ptr : tensor<*xf32>) +func.func private @printMemrefF32(%ptr : tensor<*xf32>) diff --git a/mlir/test/Integration/Dialect/Standard/CPU/test_subview.mlir b/mlir/test/Integration/Dialect/Standard/CPU/test_subview.mlir --- a/mlir/test/Integration/Dialect/Standard/CPU/test_subview.mlir +++ b/mlir/test/Integration/Dialect/Standard/CPU/test_subview.mlir @@ -15,7 +15,7 @@ /// Subview with only leading operands. %1 = memref.subview %0[2, 0][3, 3][1, 1]: memref<5x3xf32> to memref<3x3xf32, offset: 6, strides: [3, 1]> %unranked = memref.cast %1 : memref<3x3xf32, offset: 6, strides: [3, 1]> to memref<*xf32> - call @print_memref_f32(%unranked) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked) : (memref<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 2 offset = 6 sizes = [3, 3] strides = [3, 1] data = @@ -28,7 +28,7 @@ /// Regular subview. %2 = memref.subview %0[0, 2][5, 1][1, 1]: memref<5x3xf32> to memref<5x1xf32, offset: 2, strides: [3, 1]> %unranked2 = memref.cast %2 : memref<5x1xf32, offset: 2, strides: [3, 1]> to memref<*xf32> - call @print_memref_f32(%unranked2) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked2) : (memref<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 2 offset = 2 sizes = [5, 1] strides = [3, 1] data = @@ -43,7 +43,7 @@ /// Rank-reducing subview. %3 = memref.subview %0[0, 2][5, 1][1, 1]: memref<5x3xf32> to memref<5xf32, offset: 2, strides: [3]> %unranked3 = memref.cast %3 : memref<5xf32, offset: 2, strides: [3]> to memref<*xf32> - call @print_memref_f32(%unranked3) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked3) : (memref<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 1 offset = 2 sizes = [5] strides = [3] data = @@ -52,7 +52,7 @@ /// Rank-reducing subview with only leading operands. %4 = memref.subview %0[1, 0][1, 3][1, 1]: memref<5x3xf32> to memref<3xf32, offset: 3, strides: [1]> %unranked4 = memref.cast %4 : memref<3xf32, offset: 3, strides: [1]> to memref<*xf32> - call @print_memref_f32(%unranked4) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked4) : (memref<*xf32>) -> () // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} // CHECK-SAME: rank = 1 offset = 3 sizes = [3] strides = [1] data = // CHECK-NEXT: [3, 4, 5] @@ -60,4 +60,4 @@ return } -func.func private @print_memref_f32(%ptr : memref<*xf32>) +func.func private @printMemrefF32(%ptr : memref<*xf32>) diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir @@ -11,7 +11,7 @@ #map0 = affine_map<(d0, d1) -> (d1, d0)> #map1 = affine_map<(d0, d1) -> (d1)> -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) func.func @alloc_2d_filled_f32(%arg0: index, %arg1: index) -> memref { %c0 = arith.constant 0 : index @@ -40,7 +40,7 @@ %cst = arith.constant -4.2e+01 : f32 %0 = call @alloc_2d_filled_f32(%c6, %c6) : (index, index) -> memref %converted = memref.cast %0 : memref to memref<*xf32> - call @print_memref_f32(%converted): (memref<*xf32>) -> () + call @printMemrefF32(%converted): (memref<*xf32>) -> () // CHECK: Unranked{{.*}}data = // CHECK: [ // CHECK-SAME: [0, 100, 200, 300, 400, 500], diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir @@ -11,7 +11,7 @@ // RUN: mlir-opt %s -pass-pipeline="func.func(test-vector-to-forloop)" | FileCheck %s -check-prefix=TRANSFORM -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) func.func @alloc_1d_filled_inc_f32(%arg0: index, %arg1: f32) -> memref { %c0 = arith.constant 0 : index @@ -50,7 +50,7 @@ %acc = arith.addf %a, %b: vector<64xf32> vector.transfer_write %acc, %out[%c0]: vector<64xf32>, memref %converted = memref.cast %out : memref to memref<*xf32> - call @print_memref_f32(%converted): (memref<*xf32>) -> () + call @printMemrefF32(%converted): (memref<*xf32>) -> () // CHECK: Unranked{{.*}}data = // CHECK: [ // CHECK-SAME: 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir --- a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir +++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir @@ -63,7 +63,7 @@ } // Print the memref after computation. - call @print_memref_f32(%3) : (memref<*xf32>) -> () + call @printMemrefF32(%3) : (memref<*xf32>) -> () // CHECK: [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16], // CHECK-NEXT: [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16], // CHECK-NEXT: [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16], @@ -83,4 +83,4 @@ return } -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir --- a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir +++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir @@ -51,7 +51,7 @@ gpu.terminator } // Print the memref after computation. - call @print_memref_f32(%33) : (memref<*xf32>) -> () + call @printMemrefF32(%33) : (memref<*xf32>) -> () // CHECK: [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16], // CHECK-NEXT: [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16], // CHECK-NEXT: [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16], @@ -71,4 +71,4 @@ return } -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir --- a/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir +++ b/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir @@ -60,11 +60,11 @@ gpu.terminator } - call @print_memref_i32(%cast_sum) : (memref<*xi32>) -> () + call @printMemrefI32(%cast_sum) : (memref<*xi32>) -> () // CHECK: [0, 2] return } -func.func private @print_memref_i32(memref<*xi32>) +func.func private @printMemrefI32(memref<*xi32>) diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir --- a/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir +++ b/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir @@ -60,11 +60,11 @@ gpu.terminator } - call @print_memref_i32(%cast_sum) : (memref<*xi32>) -> () + call @printMemrefI32(%cast_sum) : (memref<*xi32>) -> () // CHECK: [16, 11] return } -func.func private @print_memref_i32(memref<*xi32>) +func.func private @printMemrefI32(memref<*xi32>) diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir --- a/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir +++ b/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir @@ -60,11 +60,11 @@ gpu.terminator } - call @print_memref_i32(%cast_sum) : (memref<*xi32>) -> () + call @printMemrefI32(%cast_sum) : (memref<*xi32>) -> () // CHECK: [0, 2] return } -func.func private @print_memref_i32(memref<*xi32>) +func.func private @printMemrefI32(memref<*xi32>) diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir --- a/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir +++ b/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir @@ -32,8 +32,8 @@ memref.store %sum, %dst[%tz, %ty, %tx] : memref gpu.terminator } - call @print_memref_f32(%cast_dst) : (memref<*xf32>) -> () + call @printMemrefF32(%cast_dst) : (memref<*xf32>) -> () return } -func.func private @print_memref_f32(%ptr : memref<*xf32>) +func.func private @printMemrefF32(%ptr : memref<*xf32>) diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir --- a/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir +++ b/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir @@ -60,11 +60,11 @@ gpu.terminator } - call @print_memref_i32(%cast_sum) : (memref<*xi32>) -> () + call @printMemrefI32(%cast_sum) : (memref<*xi32>) -> () // CHECK: [31, 15] return } -func.func private @print_memref_i32(memref<*xi32>) +func.func private @printMemrefI32(memref<*xi32>) diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir --- a/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir +++ b/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir @@ -29,8 +29,8 @@ memref.store %res, %dst[%tx] : memref gpu.terminator } - call @print_memref_f32(%cast_dst) : (memref<*xf32>) -> () + call @printMemrefF32(%cast_dst) : (memref<*xf32>) -> () return } -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir --- a/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir +++ b/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir @@ -60,11 +60,11 @@ gpu.terminator } - call @print_memref_i32(%cast_sum) : (memref<*xi32>) -> () + call @printMemrefI32(%cast_sum) : (memref<*xi32>) -> () // CHECK: [31, 1] return } -func.func private @print_memref_i32(memref<*xi32>) +func.func private @printMemrefI32(memref<*xi32>) diff --git a/mlir/test/Integration/GPU/CUDA/async.mlir b/mlir/test/Integration/GPU/CUDA/async.mlir --- a/mlir/test/Integration/GPU/CUDA/async.mlir +++ b/mlir/test/Integration/GPU/CUDA/async.mlir @@ -66,8 +66,8 @@ async.await %t3 : !async.token // CHECK: [84, 84] - call @print_memref_i32(%h0_unranked) : (memref<*xi32>) -> () + call @printMemrefI32(%h0_unranked) : (memref<*xi32>) -> () return } -func.func private @print_memref_i32(memref<*xi32>) +func.func private @printMemrefI32(memref<*xi32>) diff --git a/mlir/test/Integration/GPU/CUDA/gpu-to-cubin.mlir b/mlir/test/Integration/GPU/CUDA/gpu-to-cubin.mlir --- a/mlir/test/Integration/GPU/CUDA/gpu-to-cubin.mlir +++ b/mlir/test/Integration/GPU/CUDA/gpu-to-cubin.mlir @@ -31,13 +31,13 @@ %22 = memref.cast %arg0 : memref<5xf32> to memref %23 = memref.cast %22 : memref to memref<*xf32> gpu.host_register %23 : memref<*xf32> - call @print_memref_f32(%23) : (memref<*xf32>) -> () + call @printMemrefF32(%23) : (memref<*xf32>) -> () %24 = arith.constant 1.0 : f32 call @other_func(%24, %22) : (f32, memref) -> () - call @print_memref_f32(%23) : (memref<*xf32>) -> () + call @printMemrefF32(%23) : (memref<*xf32>) -> () %val1 = vector.transfer_read %arg0[%c0], %v0: memref<5xf32>, vector<2xf32> vector.print %val1: vector<2xf32> return } -func.func private @print_memref_f32(%ptr : memref<*xf32>) +func.func private @printMemrefF32(%ptr : memref<*xf32>) diff --git a/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir b/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir --- a/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir +++ b/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir @@ -65,13 +65,13 @@ gpu.terminator } - call @print_memref_f32(%cast_sum) : (memref<*xf32>) -> () + call @printMemrefF32(%cast_sum) : (memref<*xf32>) -> () // CHECK: [31, 39] - call @print_memref_f32(%cast_mul) : (memref<*xf32>) -> () + call @printMemrefF32(%cast_mul) : (memref<*xf32>) -> () // CHECK: [0, 27720] return } -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) diff --git a/mlir/test/Integration/GPU/CUDA/shuffle.mlir b/mlir/test/Integration/GPU/CUDA/shuffle.mlir --- a/mlir/test/Integration/GPU/CUDA/shuffle.mlir +++ b/mlir/test/Integration/GPU/CUDA/shuffle.mlir @@ -32,8 +32,8 @@ memref.store %value, %dst[%tx] : memref gpu.terminator } - call @print_memref_f32(%cast_dst) : (memref<*xf32>) -> () + call @printMemrefF32(%cast_dst) : (memref<*xf32>) -> () return } -func.func private @print_memref_f32(%ptr : memref<*xf32>) +func.func private @printMemrefF32(%ptr : memref<*xf32>) diff --git a/mlir/test/Integration/GPU/CUDA/two-modules.mlir b/mlir/test/Integration/GPU/CUDA/two-modules.mlir --- a/mlir/test/Integration/GPU/CUDA/two-modules.mlir +++ b/mlir/test/Integration/GPU/CUDA/two-modules.mlir @@ -29,8 +29,8 @@ memref.store %t0, %dst[%tx] : memref gpu.terminator } - call @print_memref_i32(%cast_dst) : (memref<*xi32>) -> () + call @printMemrefI32(%cast_dst) : (memref<*xi32>) -> () return } -func.func private @print_memref_i32(%memref : memref<*xi32>) +func.func private @printMemrefI32(%memref : memref<*xi32>) diff --git a/mlir/test/Integration/GPU/ROCM/gpu-to-hsaco.mlir b/mlir/test/Integration/GPU/ROCM/gpu-to-hsaco.mlir --- a/mlir/test/Integration/GPU/ROCM/gpu-to-hsaco.mlir +++ b/mlir/test/Integration/GPU/ROCM/gpu-to-hsaco.mlir @@ -28,13 +28,13 @@ %cast = memref.cast %22 : memref to memref<*xf32> gpu.host_register %cast : memref<*xf32> %23 = memref.cast %22 : memref to memref<*xf32> - call @print_memref_f32(%23) : (memref<*xf32>) -> () + call @printMemrefF32(%23) : (memref<*xf32>) -> () %24 = arith.constant 1.0 : f32 %25 = call @mgpuMemGetDeviceMemRef1dFloat(%22) : (memref) -> (memref) call @other_func(%24, %25) : (f32, memref) -> () - call @print_memref_f32(%23) : (memref<*xf32>) -> () + call @printMemrefF32(%23) : (memref<*xf32>) -> () return } func.func private @mgpuMemGetDeviceMemRef1dFloat(%ptr : memref) -> (memref) -func.func private @print_memref_f32(%ptr : memref<*xf32>) +func.func private @printMemrefF32(%ptr : memref<*xf32>) diff --git a/mlir/test/Integration/GPU/ROCM/two-modules.mlir b/mlir/test/Integration/GPU/ROCM/two-modules.mlir --- a/mlir/test/Integration/GPU/ROCM/two-modules.mlir +++ b/mlir/test/Integration/GPU/ROCM/two-modules.mlir @@ -30,9 +30,9 @@ memref.store %t0, %dst_device[%tx] : memref gpu.terminator } - call @print_memref_i32(%cast_dst) : (memref<*xi32>) -> () + call @printMemrefI32(%cast_dst) : (memref<*xi32>) -> () return } func.func private @mgpuMemGetDeviceMemRef1dInt32(%ptr : memref) -> (memref) -func.func private @print_memref_i32(%ptr : memref<*xi32>) +func.func private @printMemrefI32(%ptr : memref<*xi32>) diff --git a/mlir/test/Integration/GPU/ROCM/vecadd.mlir b/mlir/test/Integration/GPU/ROCM/vecadd.mlir --- a/mlir/test/Integration/GPU/ROCM/vecadd.mlir +++ b/mlir/test/Integration/GPU/ROCM/vecadd.mlir @@ -51,9 +51,9 @@ %11 = call @mgpuMemGetDeviceMemRef1dFloat(%5) : (memref) -> (memref) call @vecadd(%9, %10, %11) : (memref, memref, memref) -> () - call @print_memref_f32(%8) : (memref<*xf32>) -> () + call @printMemrefF32(%8) : (memref<*xf32>) -> () return } func.func private @mgpuMemGetDeviceMemRef1dFloat(%ptr : memref) -> (memref) -func.func private @print_memref_f32(%ptr : memref<*xf32>) +func.func private @printMemrefF32(%ptr : memref<*xf32>) diff --git a/mlir/test/Integration/GPU/ROCM/vector-transferops.mlir b/mlir/test/Integration/GPU/ROCM/vector-transferops.mlir --- a/mlir/test/Integration/GPU/ROCM/vector-transferops.mlir +++ b/mlir/test/Integration/GPU/ROCM/vector-transferops.mlir @@ -81,13 +81,13 @@ // CHECK: [1.23, 2.46, 2.46, 1.23] call @vectransferx2(%24, %26) : (memref, memref) -> () - call @print_memref_f32(%cast1) : (memref<*xf32>) -> () + call @printMemrefF32(%cast1) : (memref<*xf32>) -> () // CHECK: [2.46, 2.46, 2.46, 2.46] call @vectransferx4(%24, %26) : (memref, memref) -> () - call @print_memref_f32(%cast1) : (memref<*xf32>) -> () + call @printMemrefF32(%cast1) : (memref<*xf32>) -> () return } func.func private @mgpuMemGetDeviceMemRef1dFloat(%ptr : memref) -> (memref) -func.func private @print_memref_f32(%ptr : memref<*xf32>) +func.func private @printMemrefF32(%ptr : memref<*xf32>) diff --git a/mlir/test/mlir-cpu-runner/async-value.mlir b/mlir/test/mlir-cpu-runner/async-value.mlir --- a/mlir/test/mlir-cpu-runner/async-value.mlir +++ b/mlir/test/mlir-cpu-runner/async-value.mlir @@ -51,7 +51,7 @@ // CHECK: Unranked Memref // CHECK-SAME: rank = 0 offset = 0 sizes = [] strides = [] // CHECK-NEXT: [0.25] - call @print_memref_f32(%7): (memref<*xf32>) -> () + call @printMemrefF32(%7): (memref<*xf32>) -> () // ------------------------------------------------------------------------ // // Memref passed as async.execute operand. @@ -67,12 +67,12 @@ // CHECK: Unranked Memref // CHECK-SAME: rank = 0 offset = 0 sizes = [] strides = [] // CHECK-NEXT: [0.5] - call @print_memref_f32(%7): (memref<*xf32>) -> () + call @printMemrefF32(%7): (memref<*xf32>) -> () memref.dealloc %6 : memref return } -func.func private @print_memref_f32(memref<*xf32>) +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } diff --git a/mlir/test/mlir-cpu-runner/async.mlir b/mlir/test/mlir-cpu-runner/async.mlir --- a/mlir/test/mlir-cpu-runner/async.mlir +++ b/mlir/test/mlir-cpu-runner/async.mlir @@ -23,20 +23,20 @@ // CHECK: [0, 0, 0, 0] %U = memref.cast %A : memref<4xf32> to memref<*xf32> - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () // CHECK: Current thread id: [[MAIN:.*]] // CHECK: [1, 0, 0, 0] memref.store %c1, %A[%i0]: memref<4xf32> call @mlirAsyncRuntimePrintCurrentThreadId(): () -> () - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () %outer = async.execute { // CHECK: Current thread id: [[THREAD0:.*]] // CHECK: [1, 2, 0, 0] memref.store %c2, %A[%i1]: memref<4xf32> func.call @mlirAsyncRuntimePrintCurrentThreadId(): () -> () - func.call @print_memref_f32(%U): (memref<*xf32>) -> () + func.call @printMemrefF32(%U): (memref<*xf32>) -> () // No op async region to create a token for testing async dependency. %noop = async.execute { @@ -50,7 +50,7 @@ // CHECK: [1, 2, 3, 0] memref.store %c3, %A[%i2]: memref<4xf32> func.call @mlirAsyncRuntimePrintCurrentThreadId(): () -> () - func.call @print_memref_f32(%U): (memref<*xf32>) -> () + func.call @printMemrefF32(%U): (memref<*xf32>) -> () async.yield } @@ -60,7 +60,7 @@ // CHECK: [1, 2, 3, 4] memref.store %c4, %A[%i3]: memref<4xf32> func.call @mlirAsyncRuntimePrintCurrentThreadId(): () -> () - func.call @print_memref_f32(%U): (memref<*xf32>) -> () + func.call @printMemrefF32(%U): (memref<*xf32>) -> () async.yield } @@ -69,7 +69,7 @@ // CHECK: Current thread id: [[MAIN]] // CHECK: [1, 2, 3, 4] call @mlirAsyncRuntimePrintCurrentThreadId(): () -> () - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () memref.dealloc %A : memref<4xf32> @@ -78,4 +78,4 @@ func.func private @mlirAsyncRuntimePrintCurrentThreadId() -> () -func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } diff --git a/mlir/test/mlir-cpu-runner/copy.mlir b/mlir/test/mlir-cpu-runner/copy.mlir --- a/mlir/test/mlir-cpu-runner/copy.mlir +++ b/mlir/test/mlir-cpu-runner/copy.mlir @@ -3,7 +3,7 @@ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } func.func @main() -> () { %c0 = arith.constant 0 : index @@ -22,7 +22,7 @@ memref.store %val_f32, %input[%i, %j] : memref<2x3xf32> } %unranked_input = memref.cast %input : memref<2x3xf32> to memref<*xf32> - call @print_memref_f32(%unranked_input) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_input) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [2, 3] strides = [3, 1] // CHECK-NEXT: [0, 1, 2] // CHECK-NEXT: [3, 4, 5] @@ -30,7 +30,7 @@ %copy = memref.alloc() : memref<2x3xf32> memref.copy %input, %copy : memref<2x3xf32> to memref<2x3xf32> %unranked_copy = memref.cast %copy : memref<2x3xf32> to memref<*xf32> - call @print_memref_f32(%unranked_copy) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_copy) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [2, 3] strides = [3, 1] // CHECK-NEXT: [0, 1, 2] // CHECK-NEXT: [3, 4, 5] @@ -40,7 +40,7 @@ : memref<3x2xf32> to memref<2x3xf32, offset: 0, strides: [1, 2]> memref.copy %input, %copy_two_casted : memref<2x3xf32> to memref<2x3xf32, offset: 0, strides: [1, 2]> %unranked_copy_two = memref.cast %copy_two : memref<3x2xf32> to memref<*xf32> - call @print_memref_f32(%unranked_copy_two) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_copy_two) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [3, 2] strides = [2, 1] // CHECK-NEXT: [0, 3] // CHECK-NEXT: [1, 4] @@ -62,7 +62,7 @@ %scalar_copy = memref.alloc() : memref memref.copy %scalar, %scalar_copy : memref to memref %unranked_scalar_copy = memref.cast %scalar_copy : memref to memref<*xf32> - call @print_memref_f32(%unranked_scalar_copy) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_scalar_copy) : (memref<*xf32>) -> () // CHECK: rank = 0 offset = 0 sizes = [] strides = [] // CHECK-NEXT [42] diff --git a/mlir/test/mlir-cpu-runner/global-memref.mlir b/mlir/test/mlir-cpu-runner/global-memref.mlir --- a/mlir/test/mlir-cpu-runner/global-memref.mlir +++ b/mlir/test/mlir-cpu-runner/global-memref.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -pass-pipeline="func.func(convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } -func.func private @print_memref_i32(memref<*xi32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefI32(memref<*xi32>) attributes { llvm.emit_c_interface } func.func private @printNewline() -> () memref.global "private" @gv0 : memref<4xf32> = dense<[0.0, 1.0, 2.0, 3.0]> @@ -13,7 +13,7 @@ // CHECK: sizes = [4] // CHECK: strides = [1] // CHECK: [0, 1, 2, 3] - call @print_memref_f32(%U) : (memref<*xf32>) -> () + call @printMemrefF32(%U) : (memref<*xf32>) -> () call @printNewline() : () -> () // Overwrite some of the elements. @@ -28,7 +28,7 @@ // CHECK: sizes = [4] // CHECK: strides = [1] // CHECK: [4, 1, 5, 3] - call @print_memref_f32(%U) : (memref<*xf32>) -> () + call @printMemrefF32(%U) : (memref<*xf32>) -> () call @printNewline() : () -> () return } @@ -44,7 +44,7 @@ // CHECK: [0, 1] // CHECK: [2, 3] // CHECK: [4, 5] - call @print_memref_i32(%U) : (memref<*xi32>) -> () + call @printMemrefI32(%U) : (memref<*xi32>) -> () call @printNewline() : () -> () return } @@ -61,7 +61,7 @@ // CHECK: [2, 3] // CHECK: [4, 5] // CHECK: [6, 7] - call @print_memref_f32(%U) : (memref<*xf32>) -> () + call @printMemrefF32(%U) : (memref<*xf32>) -> () call @printNewline() : () -> () // Overwrite the 1.0 (at index [0, 1]) with 10.0 @@ -77,7 +77,7 @@ // CHECK: [2, 3] // CHECK: [4, 5] // CHECK: [6, 7] - call @print_memref_f32(%U) : (memref<*xf32>) -> () + call @printMemrefF32(%U) : (memref<*xf32>) -> () call @printNewline() : () -> () return } @@ -91,7 +91,7 @@ // CHECK: sizes = [] // CHECK: strides = [] // CHECK: [11] - call @print_memref_i32(%U) : (memref<*xi32>) -> () + call @printMemrefI32(%U) : (memref<*xi32>) -> () call @printNewline() : () -> () return } diff --git a/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir b/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir --- a/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir +++ b/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir @@ -3,7 +3,7 @@ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } func.func @main() -> () { %c0 = arith.constant 0 : index @@ -21,7 +21,7 @@ memref.store %val_f32, %input[%i, %j] : memref<2x3xf32> } %unranked_input = memref.cast %input : memref<2x3xf32> to memref<*xf32> - call @print_memref_f32(%unranked_input) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_input) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [2, 3] strides = [3, 1] // CHECK-NEXT: [0, 1, 2] // CHECK-NEXT: [3, 4, 5] @@ -42,7 +42,7 @@ %unranked_output = memref.cast %output : memref<6x1xf32> to memref<*xf32> - call @print_memref_f32(%unranked_output) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_output) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [6, 1] strides = [1, 1] data = // CHECK-NEXT: [0], // CHECK-NEXT: [1], @@ -63,7 +63,7 @@ %unranked_output = memref.cast %output : memref to memref<*xf32> - call @print_memref_f32(%unranked_output) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_output) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [1, 6] strides = [6, 1] data = // CHECK-NEXT: [0, 1, 2, 3, 4, 5] return @@ -77,7 +77,7 @@ %unranked_output = memref.cast %output : memref<6x1xf32> to memref<*xf32> - call @print_memref_f32(%unranked_output) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_output) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [6, 1] strides = [1, 1] data = // CHECK-NEXT: [0], // CHECK-NEXT: [1], @@ -99,7 +99,7 @@ %unranked_output = memref.cast %output : memref to memref<*xf32> - call @print_memref_f32(%unranked_output) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_output) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [1, 6] strides = [6, 1] data = // CHECK-NEXT: [0, 1, 2, 3, 4, 5] return diff --git a/mlir/test/mlir-cpu-runner/memref-reshape.mlir b/mlir/test/mlir-cpu-runner/memref-reshape.mlir --- a/mlir/test/mlir-cpu-runner/memref-reshape.mlir +++ b/mlir/test/mlir-cpu-runner/memref-reshape.mlir @@ -4,7 +4,7 @@ // RUN: | FileCheck %s -func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } func.func @main() -> () { %c0 = arith.constant 0 : index @@ -22,7 +22,7 @@ memref.store %val_f32, %input[%i, %j] : memref<2x3xf32> } %unranked_input = memref.cast %input : memref<2x3xf32> to memref<*xf32> - call @print_memref_f32(%unranked_input) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_input) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [2, 3] strides = [3, 1] // CHECK-NEXT: [0, 1, 2] // CHECK-NEXT: [3, 4, 5] @@ -54,7 +54,7 @@ : (memref<2x3xf32>, memref<2xindex>) -> memref %unranked_output = memref.cast %output : memref to memref<*xf32> - call @print_memref_f32(%unranked_output) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_output) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [3, 2] strides = [2, 1] data = // CHECK: [0, 1], // CHECK: [2, 3], @@ -69,7 +69,7 @@ : (memref<2x3xf32>, memref<2xindex>) -> memref %unranked_output = memref.cast %output : memref to memref<*xf32> - call @print_memref_f32(%unranked_output) : (memref<*xf32>) -> () + call @printMemrefF32(%unranked_output) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [3, 2] strides = [2, 1] data = // CHECK: [0, 1], // CHECK: [2, 3], @@ -83,7 +83,7 @@ %output = memref.reshape %input(%dyn_size_shape) : (memref<2x3xf32>, memref) -> memref<*xf32> - call @print_memref_f32(%output) : (memref<*xf32>) -> () + call @printMemrefF32(%output) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [3, 2] strides = [2, 1] data = // CHECK: [0, 1], // CHECK: [2, 3], @@ -98,7 +98,7 @@ %output = memref.reshape %input(%dyn_size_shape) : (memref<2x3xf32>, memref) -> memref<*xf32> - call @print_memref_f32(%output) : (memref<*xf32>) -> () + call @printMemrefF32(%output) : (memref<*xf32>) -> () // CHECK: rank = 2 offset = 0 sizes = [3, 2] strides = [2, 1] data = // CHECK: [0, 1], // CHECK: [2, 3], diff --git a/mlir/test/mlir-cpu-runner/print.mlir b/mlir/test/mlir-cpu-runner/print.mlir --- a/mlir/test/mlir-cpu-runner/print.mlir +++ b/mlir/test/mlir-cpu-runner/print.mlir @@ -5,14 +5,14 @@ llvm.mlir.global internal constant @str_global("String to print\0A") -llvm.func @print_c_string(!llvm.ptr) +llvm.func @printCString(!llvm.ptr) func.func @main() { %0 = llvm.mlir.addressof @str_global : !llvm.ptr> %1 = llvm.mlir.constant(0 : index) : i64 %2 = llvm.getelementptr %0[%1, %1] : (!llvm.ptr>, i64, i64) -> !llvm.ptr - llvm.call @print_c_string(%2) : (!llvm.ptr) -> () + llvm.call @printCString(%2) : (!llvm.ptr) -> () return } diff --git a/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir b/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir --- a/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir +++ b/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir @@ -40,7 +40,7 @@ %num_flops_i = arith.index_cast %num_flops : index to i16 %num_flops_f = arith.sitofp %num_flops_i : i16 to f64 %flops = arith.divf %num_flops_f, %t : f64 - call @print_flops(%flops) : (f64) -> () + call @printFlops(%flops) : (f64) -> () memref.dealloc %A : memref<16x16xf32> memref.dealloc %B : memref<16x16xf32> @@ -72,5 +72,5 @@ return } -func.func private @print_flops(f64) +func.func private @printFlops(f64) func.func private @rtclock() -> f64 diff --git a/mlir/test/mlir-cpu-runner/unranked-memref.mlir b/mlir/test/mlir-cpu-runner/unranked-memref.mlir --- a/mlir/test/mlir-cpu-runner/unranked-memref.mlir +++ b/mlir/test/mlir-cpu-runner/unranked-memref.mlir @@ -44,25 +44,25 @@ %V = memref.cast %A : memref<10x3xf32, 0> to memref linalg.fill ins(%f10 : f32) outs(%V : memref) %U = memref.cast %A : memref<10x3xf32, 0> to memref<*xf32> - call @print_memref_f32(%U) : (memref<*xf32>) -> () + call @printMemrefF32(%U) : (memref<*xf32>) -> () %V2 = memref.cast %U : memref<*xf32> to memref linalg.fill ins(%f5 : f32) outs(%V2 : memref) %U2 = memref.cast %V2 : memref to memref<*xf32> - call @print_memref_f32(%U2) : (memref<*xf32>) -> () + call @printMemrefF32(%U2) : (memref<*xf32>) -> () %V3 = memref.cast %V2 : memref to memref<*xf32> %V4 = memref.cast %V3 : memref<*xf32> to memref linalg.fill ins(%f2 : f32) outs(%V4 : memref) %U3 = memref.cast %V2 : memref to memref<*xf32> - call @print_memref_f32(%U3) : (memref<*xf32>) -> () + call @printMemrefF32(%U3) : (memref<*xf32>) -> () // 122 is ASCII for 'z'. %i8_z = arith.constant 122 : i8 %I8 = memref.alloc() : memref memref.store %i8_z, %I8[]: memref %U4 = memref.cast %I8 : memref to memref<*xi8> - call @print_memref_i8(%U4) : (memref<*xi8>) -> () + call @printMemrefI8(%U4) : (memref<*xi8>) -> () memref.dealloc %U4 : memref<*xi8> memref.dealloc %A : memref<10x3xf32, 0> @@ -73,16 +73,16 @@ return } -func.func private @print_memref_i8(memref<*xi8>) attributes { llvm.emit_c_interface } -func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefI8(memref<*xi8>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } func.func @return_two_var_memref_caller() { %0 = memref.alloca() : memref<4x3xf32> %c0f32 = arith.constant 1.0 : f32 linalg.fill ins(%c0f32 : f32) outs(%0 : memref<4x3xf32>) %1:2 = call @return_two_var_memref(%0) : (memref<4x3xf32>) -> (memref<*xf32>, memref<*xf32>) - call @print_memref_f32(%1#0) : (memref<*xf32>) -> () - call @print_memref_f32(%1#1) : (memref<*xf32>) -> () + call @printMemrefF32(%1#0) : (memref<*xf32>) -> () + call @printMemrefF32(%1#1) : (memref<*xf32>) -> () return } @@ -96,7 +96,7 @@ %c0f32 = arith.constant 1.0 : f32 linalg.fill ins(%c0f32 : f32) outs(%0 : memref<4x3xf32>) %1 = call @return_var_memref(%0) : (memref<4x3xf32>) -> memref<*xf32> - call @print_memref_f32(%1) : (memref<*xf32>) -> () + call @printMemrefF32(%1) : (memref<*xf32>) -> () return } diff --git a/mlir/test/mlir-cpu-runner/utils.mlir b/mlir/test/mlir-cpu-runner/utils.mlir --- a/mlir/test/mlir-cpu-runner/utils.mlir +++ b/mlir/test/mlir-cpu-runner/utils.mlir @@ -8,7 +8,7 @@ %A = memref.alloc() : memref memref.store %f, %A[]: memref %U = memref.cast %A : memref to memref<*xf32> - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () memref.dealloc %A : memref return } @@ -21,7 +21,7 @@ %B = memref.cast %A: memref<16xf32> to memref linalg.fill ins(%f : f32) outs(%B : memref) %U = memref.cast %B : memref to memref<*xf32> - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () memref.dealloc %A : memref<16xf32> return } @@ -38,7 +38,7 @@ %c2 = arith.constant 2 : index memref.store %f4, %B[%c2, %c2, %c2]: memref %U = memref.cast %B : memref to memref<*xf32> - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () memref.dealloc %A : memref<3x4x5xf32> return } @@ -49,7 +49,7 @@ // PRINT-3D-NEXT: 2, 2, 4, 2, 2 // PRINT-3D-NEXT: 2, 2, 2, 2, 2 -func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } !vector_type_C = type vector<4x4xf32> !matrix_type_CC = type memref<1x1x!vector_type_C> @@ -61,7 +61,7 @@ memref.store %vf10, %C[%c0, %c0]: !matrix_type_CC %CC = memref.cast %C: !matrix_type_CC to memref - call @print_memref_vector_4x4xf32(%CC): (memref) -> () + call @printMemrefVector4x4xf32(%CC): (memref) -> () memref.dealloc %C : !matrix_type_CC return @@ -70,4 +70,4 @@ // PRINT-VECTOR-SPLAT-2D: Memref base@ = {{.*}} rank = 2 offset = 0 sizes = [1, 1] strides = [1, 1] data = // PRINT-VECTOR-SPLAT-2D-NEXT: [((10, 10, 10, 10), (10, 10, 10, 10), (10, 10, 10, 10), (10, 10, 10, 10))] -func.func private @print_memref_vector_4x4xf32(memref) attributes { llvm.emit_c_interface } +func.func private @printMemrefVector4x4xf32(memref) attributes { llvm.emit_c_interface } diff --git a/mlir/test/mlir-opt/async.mlir b/mlir/test/mlir-opt/async.mlir --- a/mlir/test/mlir-opt/async.mlir +++ b/mlir/test/mlir-opt/async.mlir @@ -23,16 +23,16 @@ linalg.fill ins(%c0 : f32) outs(%A : memref<4xf32>) %U = memref.cast %A : memref<4xf32> to memref<*xf32> - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () memref.store %c1, %A[%i0]: memref<4xf32> call @mlirAsyncRuntimePrintCurrentThreadId(): () -> () - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () %outer = async.execute { memref.store %c2, %A[%i1]: memref<4xf32> func.call @mlirAsyncRuntimePrintCurrentThreadId(): () -> () - func.call @print_memref_f32(%U): (memref<*xf32>) -> () + func.call @printMemrefF32(%U): (memref<*xf32>) -> () // No op async region to create a token for testing async dependency. %noop = async.execute { @@ -43,7 +43,7 @@ %inner = async.execute [%noop] { memref.store %c3, %A[%i2]: memref<4xf32> func.call @mlirAsyncRuntimePrintCurrentThreadId(): () -> () - func.call @print_memref_f32(%U): (memref<*xf32>) -> () + func.call @printMemrefF32(%U): (memref<*xf32>) -> () async.yield } @@ -51,14 +51,14 @@ memref.store %c4, %A[%i3]: memref<4xf32> func.call @mlirAsyncRuntimePrintCurrentThreadId(): () -> () - func.call @print_memref_f32(%U): (memref<*xf32>) -> () + func.call @printMemrefF32(%U): (memref<*xf32>) -> () async.yield } async.await %outer : !async.token call @mlirAsyncRuntimePrintCurrentThreadId(): () -> () - call @print_memref_f32(%U): (memref<*xf32>) -> () + call @printMemrefF32(%U): (memref<*xf32>) -> () memref.dealloc %A : memref<4xf32> @@ -67,4 +67,4 @@ func.func private @mlirAsyncRuntimePrintCurrentThreadId() -> () -func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } +func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } diff --git a/mlir/test/mlir-spirv-cpu-runner/double.mlir b/mlir/test/mlir-spirv-cpu-runner/double.mlir --- a/mlir/test/mlir-spirv-cpu-runner/double.mlir +++ b/mlir/test/mlir-spirv-cpu-runner/double.mlir @@ -58,10 +58,10 @@ blocks in (%one, %one, %one) threads in (%one, %one, %one) args(%input : memref<6xi32>, %output : memref<6xi32>) %result = memref.cast %output : memref<6xi32> to memref<*xi32> - call @print_memref_i32(%result) : (memref<*xi32>) -> () + call @printMemrefI32(%result) : (memref<*xi32>) -> () return } func.func private @fillI32Buffer(%arg0 : memref, %arg1 : i32) - func.func private @print_memref_i32(%ptr : memref<*xi32>) + func.func private @printMemrefI32(%ptr : memref<*xi32>) } diff --git a/mlir/test/mlir-spirv-cpu-runner/simple_add.mlir b/mlir/test/mlir-spirv-cpu-runner/simple_add.mlir --- a/mlir/test/mlir-spirv-cpu-runner/simple_add.mlir +++ b/mlir/test/mlir-spirv-cpu-runner/simple_add.mlir @@ -51,11 +51,11 @@ blocks in (%one, %one, %one) threads in (%one, %one, %one) args(%input1 : memref<3xf32>, %input2 : memref<3x3xf32>, %output : memref<3x3x3xf32>) %result = memref.cast %output : memref<3x3x3xf32> to memref<*xf32> - call @print_memref_f32(%result) : (memref<*xf32>) -> () + call @printMemrefF32(%result) : (memref<*xf32>) -> () return } func.func private @fillF32Buffer1D(%arg0 : memref, %arg1 : f32) func.func private @fillF32Buffer2D(%arg0 : memref, %arg1 : f32) func.func private @fillF32Buffer3D(%arg0 : memref, %arg1 : f32) - func.func private @print_memref_f32(%arg0 : memref<*xf32>) + func.func private @printMemrefF32(%arg0 : memref<*xf32>) } diff --git a/mlir/test/mlir-vulkan-runner/addf.mlir b/mlir/test/mlir-vulkan-runner/addf.mlir --- a/mlir/test/mlir-vulkan-runner/addf.mlir +++ b/mlir/test/mlir-vulkan-runner/addf.mlir @@ -41,10 +41,10 @@ blocks in (%cst8, %cst1, %cst1) threads in (%cst1, %cst1, %cst1) args(%arg0 : memref<8xf32>, %arg1 : memref<8xf32>, %arg2 : memref<8xf32>) %arg6 = memref.cast %arg5 : memref to memref<*xf32> - call @print_memref_f32(%arg6) : (memref<*xf32>) -> () + call @printMemrefF32(%arg6) : (memref<*xf32>) -> () return } func.func private @fillResource1DFloat(%0 : memref, %1 : f32) - func.func private @print_memref_f32(%ptr : memref<*xf32>) + func.func private @printMemrefF32(%ptr : memref<*xf32>) } diff --git a/mlir/test/mlir-vulkan-runner/addi.mlir b/mlir/test/mlir-vulkan-runner/addi.mlir --- a/mlir/test/mlir-vulkan-runner/addi.mlir +++ b/mlir/test/mlir-vulkan-runner/addi.mlir @@ -40,12 +40,12 @@ blocks in (%cst8, %cst8, %cst8) threads in (%cst1, %cst1, %cst1) args(%arg0 : memref<8xi32>, %arg1 : memref<8x8xi32>, %arg2 : memref<8x8x8xi32>) %arg6 = memref.cast %arg5 : memref to memref<*xi32> - call @print_memref_i32(%arg6) : (memref<*xi32>) -> () + call @printMemrefI32(%arg6) : (memref<*xi32>) -> () return } func.func private @fillResource1DInt(%0 : memref, %1 : i32) func.func private @fillResource2DInt(%0 : memref, %1 : i32) func.func private @fillResource3DInt(%0 : memref, %1 : i32) - func.func private @print_memref_i32(%ptr : memref<*xi32>) + func.func private @printMemrefI32(%ptr : memref<*xi32>) } diff --git a/mlir/test/mlir-vulkan-runner/addi8.mlir b/mlir/test/mlir-vulkan-runner/addi8.mlir --- a/mlir/test/mlir-vulkan-runner/addi8.mlir +++ b/mlir/test/mlir-vulkan-runner/addi8.mlir @@ -41,11 +41,11 @@ blocks in (%cst8, %cst8, %cst8) threads in (%cst1, %cst1, %cst1) args(%arg0 : memref<8xi8>, %arg1 : memref<8x8xi8>, %arg2 : memref<8x8x8xi32>) %arg6 = memref.cast %arg5 : memref to memref<*xi32> - call @print_memref_i32(%arg6) : (memref<*xi32>) -> () + call @printMemrefI32(%arg6) : (memref<*xi32>) -> () return } func.func private @fillResource1DInt8(%0 : memref, %1 : i8) func.func private @fillResource2DInt8(%0 : memref, %1 : i8) func.func private @fillResource3DInt(%0 : memref, %1 : i32) - func.func private @print_memref_i32(%ptr : memref<*xi32>) + func.func private @printMemrefI32(%ptr : memref<*xi32>) } diff --git a/mlir/test/mlir-vulkan-runner/mulf.mlir b/mlir/test/mlir-vulkan-runner/mulf.mlir --- a/mlir/test/mlir-vulkan-runner/mulf.mlir +++ b/mlir/test/mlir-vulkan-runner/mulf.mlir @@ -42,10 +42,10 @@ blocks in (%cst4, %cst4, %cst1) threads in(%cst1, %cst1, %cst1) args(%arg0 : memref<4x4xf32>, %arg1 : memref<4x4xf32>, %arg2 : memref<4x4xf32>) %arg6 = memref.cast %arg5 : memref to memref<*xf32> - call @print_memref_f32(%arg6) : (memref<*xf32>) -> () + call @printMemrefF32(%arg6) : (memref<*xf32>) -> () return } func.func private @fillResource2DFloat(%0 : memref, %1 : f32) - func.func private @print_memref_f32(%ptr : memref<*xf32>) + func.func private @printMemrefF32(%ptr : memref<*xf32>) } diff --git a/mlir/test/mlir-vulkan-runner/subf.mlir b/mlir/test/mlir-vulkan-runner/subf.mlir --- a/mlir/test/mlir-vulkan-runner/subf.mlir +++ b/mlir/test/mlir-vulkan-runner/subf.mlir @@ -44,10 +44,10 @@ blocks in (%cst8, %cst4, %cst4) threads in (%cst1, %cst1, %cst1) args(%arg0 : memref<8x4x4xf32>, %arg1 : memref<4x4xf32>, %arg2 : memref<8x4x4xf32>) %arg6 = memref.cast %arg5 : memref to memref<*xf32> - call @print_memref_f32(%arg6) : (memref<*xf32>) -> () + call @printMemrefF32(%arg6) : (memref<*xf32>) -> () return } func.func private @fillResource2DFloat(%0 : memref, %1 : f32) func.func private @fillResource3DFloat(%0 : memref, %1 : f32) - func.func private @print_memref_f32(%ptr : memref<*xf32>) + func.func private @printMemrefF32(%ptr : memref<*xf32>) } diff --git a/mlir/test/mlir-vulkan-runner/time.mlir b/mlir/test/mlir-vulkan-runner/time.mlir --- a/mlir/test/mlir-vulkan-runner/time.mlir +++ b/mlir/test/mlir-vulkan-runner/time.mlir @@ -51,6 +51,6 @@ return } func.func private @fillResource1DFloat(%0 : memref, %1 : f32) - func.func private @print_memref_f32(%ptr : memref<*xf32>) + func.func private @printMemrefF32(%ptr : memref<*xf32>) } diff --git a/mlir/test/python/execution_engine.py b/mlir/test/python/execution_engine.py --- a/mlir/test/python/execution_engine.py +++ b/mlir/test/python/execution_engine.py @@ -335,10 +335,10 @@ %cst42 = arith.constant 42.0 : f32 memref.store %cst42, %arg0[%c0] : memref<1xf32> %u_memref = memref.cast %arg0 : memref<1xf32> to memref<*xf32> - call @print_memref_f32(%u_memref) : (memref<*xf32>) -> () + call @printMemrefF32(%u_memref) : (memref<*xf32>) -> () return } - func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } + func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface } } """) arg0 = np.array([0.0]).astype(np.float32) @@ -367,16 +367,16 @@ module = Module.parse(""" module { func.func @main() attributes { llvm.emit_c_interface } { - %now = call @nano_time() : () -> i64 + %now = call @nanoTime() : () -> i64 %memref = memref.alloca() : memref<1xi64> %c0 = arith.constant 0 : index memref.store %now, %memref[%c0] : memref<1xi64> %u_memref = memref.cast %memref : memref<1xi64> to memref<*xi64> - call @print_memref_i64(%u_memref) : (memref<*xi64>) -> () + call @printMemrefI64(%u_memref) : (memref<*xi64>) -> () return } - func.func private @nano_time() -> i64 attributes { llvm.emit_c_interface } - func.func private @print_memref_i64(memref<*xi64>) attributes { llvm.emit_c_interface } + func.func private @nanoTime() -> i64 attributes { llvm.emit_c_interface } + func.func private @printMemrefI64(memref<*xi64>) attributes { llvm.emit_c_interface } }""") execution_engine = ExecutionEngine(