diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp --- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -183,6 +183,8 @@ "__nv_log10"); patterns.insert>(converter, "__nv_log2f", "__nv_log2"); + patterns.insert>(converter, "__nv_rsqrtf", + "__nv_rsqrt"); patterns.insert>(converter, "__nv_sinf", "__nv_sin"); patterns.insert>(converter, "__nv_tanhf", diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir --- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -295,6 +295,26 @@ // ----- +gpu.module @test_module { + // CHECK: llvm.func @__nv_rsqrtf(!llvm.float) -> !llvm.float + // CHECK: llvm.func @__nv_rsqrt(!llvm.double) -> !llvm.double + // CHECK-LABEL: func @gpu_rsqrt + func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) + -> (f16, f32, f64) { + %result16 = std.rsqrt %arg_f16 : f16 + // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float + // CHECK-NEXT: llvm.call @__nv_rsqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half + %result32 = std.rsqrt %arg_f32 : f32 + // CHECK: llvm.call @__nv_rsqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float + %result64 = std.rsqrt %arg_f64 : f64 + // CHECK: llvm.call @__nv_rsqrt(%{{.*}}) : (!llvm.double) -> !llvm.double + std.return %result16, %result32, %result64 : f16, f32, f64 + } +} + +// ----- + // Test that we handled properly operation with SymbolTable other than module op gpu.module @test_module { "test.symbol_scope"() ({