diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp --- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -293,8 +293,9 @@ target.addLegalDialect<::mlir::NVVM::NVVMDialect>(); target.addIllegalDialect(); target.addIllegalOp(); + LLVM::FCeilOp, LLVM::FFloorOp, LLVM::FRemOp, LLVM::LogOp, + LLVM::Log10Op, LLVM::Log2Op, LLVM::PowOp, LLVM::SinOp, + LLVM::SqrtOp>(); // TODO: Remove once we support replacing non-root ops. target.addLegalOp(); @@ -357,6 +358,8 @@ "__nv_expm1"); populateOpPatterns(converter, patterns, "__nv_floorf", "__nv_floor"); + populateOpPatterns(converter, patterns, "__nv_fmodf", + "__nv_fmod"); populateOpPatterns(converter, patterns, "__nv_logf", "__nv_log"); populateOpPatterns(converter, patterns, "__nv_log1pf", "__nv_log1p"); diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir --- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -616,6 +616,19 @@ } } +gpu.module @test_module_31 { + // CHECK: llvm.func @__nv_fmodf(f32, f32) -> f32 + // CHECK: llvm.func @__nv_fmod(f64, f64) -> f64 + // CHECK-LABEL: func @gpu_fmod + func.func @gpu_fmod(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + %result32 = arith.remf %arg_f32, %arg_f32 : f32 + // CHECK: llvm.call @__nv_fmodf(%{{.*}}, %{{.*}}) : (f32, f32) -> f32 + %result64 = arith.remf %arg_f64, %arg_f64 : f64 + // CHECK: llvm.call @__nv_fmod(%{{.*}}, %{{.*}}) : (f64, f64) -> f64 + func.return %result32, %result64 : f32, f64 + } +} + transform.sequence failures(propagate) { ^bb1(%toplevel_module: !transform.any_op): %gpu_module = transform.structured.match ops{["gpu.module"]} in %toplevel_module