diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp @@ -245,6 +245,9 @@ Table[RTLIB::FMAX_F32] = f32_func_f32_f32; Table[RTLIB::FMAX_F64] = f64_func_f64_f64; Table[RTLIB::FMAX_F128] = i64_i64_func_i64_i64_i64_i64; + Table[RTLIB::LDEXP_F32] = f32_func_f32_i32; + Table[RTLIB::LDEXP_F64] = f64_func_f64_i32; + Table[RTLIB::LDEXP_F128] = i64_i64_func_i64_i64_i32; // Conversion // All F80 and PPCF128 routines are unsupported. diff --git a/llvm/test/CodeGen/WebAssembly/libcalls.ll b/llvm/test/CodeGen/WebAssembly/libcalls.ll --- a/llvm/test/CodeGen/WebAssembly/libcalls.ll +++ b/llvm/test/CodeGen/WebAssembly/libcalls.ll @@ -19,6 +19,7 @@ declare double @llvm.log.f64(double) declare double @llvm.exp.f64(double) declare double @llvm.roundeven.f64(double) +declare double @llvm.ldexp.f64.i32(double, i32) declare i32 @llvm.lround(double) define fp128 @fp128libcalls(fp128 %x, fp128 %y, i32 %z) { @@ -231,23 +232,25 @@ ret i128 %c } -define i32 @f64libcalls(double %x, double %y, i32 %z) { +define double @f64libcalls(double %x, double %y, i32 %z) { ; CHECK-LABEL: f64libcalls: -; CHECK: .functype f64libcalls (f64, f64, i32) -> (i32) +; CHECK: .functype f64libcalls (f64, f64, i32) -> (f64) ; CHECK-NEXT: # %bb.0: -; CHECK-NEXT: local.get $push9=, 0 -; CHECK-NEXT: call $push0=, cos, $pop9 +; CHECK-NEXT: local.get $push13=, 0 +; CHECK-NEXT: local.get $push10=, 0 +; CHECK-NEXT: call $push0=, cos, $pop10 ; CHECK-NEXT: call $push1=, log10, $pop0 -; CHECK-NEXT: local.get $push10=, 1 -; CHECK-NEXT: call $push2=, pow, $pop1, $pop10 -; CHECK-NEXT: local.get $push11=, 2 -; CHECK-NEXT: call $push3=, __powidf2, $pop2, $pop11 +; CHECK-NEXT: local.get $push11=, 1 +; CHECK-NEXT: call $push2=, pow, $pop1, $pop11 +; CHECK-NEXT: local.get $push12=, 2 +; CHECK-NEXT: call $push3=, __powidf2, $pop2, $pop12 ; CHECK-NEXT: call $push4=, log, $pop3 ; CHECK-NEXT: call $push5=, exp, $pop4 ; CHECK-NEXT: call $push6=, cbrt, $pop5 ; CHECK-NEXT: call $push7=, roundeven, $pop6 ; CHECK-NEXT: call $push8=, lround, $pop7 -; CHECK-NEXT: return $pop8 +; CHECK-NEXT: call $push9=, ldexp, $pop13, $pop8 +; CHECK-NEXT: return $pop9 %a = call double @llvm.cos.f64(double %x) %b = call double @llvm.log10.f64(double %a) %c = call double @llvm.pow.f64(double %b, double %y) @@ -257,7 +260,8 @@ %g = call fast double @llvm.pow.f64(double %f, double 0x3FD5555555555555) %h = call double @llvm.roundeven.f64(double %g) %i = call i32 @llvm.lround(double %h) - ret i32 %i + %j = call double @llvm.ldexp.f64.i32(double %x, i32 %i); + ret double %j } ; fcmp ord and unord (RTLIB::O_F32 / RTLIB::UO_F32 etc) are a special case (see