Index: clang/include/clang/Basic/Builtins.def =================================================================== --- clang/include/clang/Basic/Builtins.def +++ clang/include/clang/Basic/Builtins.def @@ -157,6 +157,7 @@ BUILTIN(__builtin_ldexpf, "ffi" , "Fne") BUILTIN(__builtin_ldexpl, "LdLdi", "Fne") BUILTIN(__builtin_ldexpf128, "LLdLLdi", "Fne") +BUILTIN(__builtin_ldexpf16, "hhi", "Fne") BUILTIN(__builtin_modf , "ddd*" , "Fn") BUILTIN(__builtin_modff, "fff*" , "Fn") BUILTIN(__builtin_modfl, "LdLdLd*", "Fn") Index: clang/lib/CodeGen/CGBuiltin.cpp =================================================================== --- clang/lib/CodeGen/CGBuiltin.cpp +++ clang/lib/CodeGen/CGBuiltin.cpp @@ -524,6 +524,25 @@ } } +// Has second type mangled argument. +static Value *emitBinaryExpMaybeConstrainedFPBuiltin( + CodeGenFunction &CGF, const CallExpr *E, llvm::Intrinsic::ID IntrinsicID, + llvm::Intrinsic::ID ConstrainedIntrinsicID) { + llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); + llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); + + if (CGF.Builder.getIsFPConstrained()) { + CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); + Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, + {Src0->getType(), Src1->getType()}); + return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1}); + } + + Function *F = + CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()}); + return CGF.Builder.CreateCall(F, {Src0, Src1}); +} + // Emit an intrinsic that has 3 operands of the same type as its result. // Depending on mode, this may be a constrained floating-point intrinsic. static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, @@ -2551,7 +2570,15 @@ return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( *this, E, Intrinsic::llrint, Intrinsic::experimental_constrained_llrint)); - + case Builtin::BI__builtin_ldexp: + case Builtin::BI__builtin_ldexpf: + case Builtin::BI__builtin_ldexpl: + case Builtin::BI__builtin_ldexpf16: + case Builtin::BI__builtin_ldexpf128: { + return RValue::get(emitBinaryExpMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::ldexp, + Intrinsic::experimental_constrained_ldexp)); + } default: break; } @@ -3021,6 +3048,8 @@ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); if (Builder.getIsFPConstrained()) { + // FIXME: llvm.powi has 2 mangling types, + // llvm.experimental.constrained.powi has one. CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi, Src0->getType()); Index: clang/test/CodeGen/constrained-math-builtins.c =================================================================== --- clang/test/CodeGen/constrained-math-builtins.c +++ clang/test/CodeGen/constrained-math-builtins.c @@ -7,7 +7,7 @@ #pragma float_control(except, on) -void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) { +void foo(double *d, float f, float *fp, long double *l, int *i, const char *c, _Float16 h) { f = __builtin_fmod(f,f); f = __builtin_fmodf(f,f); f = __builtin_fmodl(f,f); f = __builtin_fmodf128(f,f); // CHECK: call double @llvm.experimental.constrained.frem.f64(double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict") @@ -28,6 +28,14 @@ // CHECK: call float @llvm.experimental.constrained.powi.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict") // CHECK: call x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict") + + h = __builtin_ldexpf16(h, *i); *d = __builtin_ldexp(*d, *i); f = __builtin_ldexpf(f, *i); __builtin_ldexpl(*l, *i); + +// CHECK: call half @llvm.experimental.constrained.ldexp.f16.i32(half %{{.*}}, i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict") +// CHECK: call double @llvm.experimental.constrained.ldexp.f64.i32(double %{{.*}}, i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict") +// CHECK: call float @llvm.experimental.constrained.ldexp.f32.i32(float %{{.*}}, i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict") +// CHECK: call x86_fp80 @llvm.experimental.constrained.ldexp.f80.i32(x86_fp80 %{{.*}}, i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict") + __builtin_ceil(f); __builtin_ceilf(f); __builtin_ceill(f); __builtin_ceilf128(f); // CHECK: call double @llvm.experimental.constrained.ceil.f64(double %{{.*}}, metadata !"fpexcept.strict") @@ -190,6 +198,11 @@ // CHECK: declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata) // CHECK: declare x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80, i32, metadata, metadata) +// CHECK: declare half @llvm.experimental.constrained.ldexp.f16.i32(half, i32, metadata, metadata) +// CHECK: declare double @llvm.experimental.constrained.ldexp.f64.i32(double, i32, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.ldexp.f32.i32(float, i32, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.ldexp.f80.i32(x86_fp80, i32, metadata, metadata) + // CHECK: declare double @llvm.experimental.constrained.ceil.f64(double, metadata) // CHECK: declare float @llvm.experimental.constrained.ceil.f32(float, metadata) // CHECK: declare x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80, metadata) Index: clang/test/CodeGen/math-builtins.c =================================================================== --- clang/test/CodeGen/math-builtins.c +++ clang/test/CodeGen/math-builtins.c @@ -77,10 +77,10 @@ __builtin_ldexp(f,f); __builtin_ldexpf(f,f); __builtin_ldexpl(f,f); __builtin_ldexpf128(f,f); -// NO__ERRNO: declare double @ldexp(double noundef, i32 noundef) [[READNONE]] -// NO__ERRNO: declare float @ldexpf(float noundef, i32 noundef) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @ldexpl(x86_fp80 noundef, i32 noundef) [[READNONE]] -// NO__ERRNO: declare fp128 @ldexpf128(fp128 noundef, i32 noundef) [[READNONE]] +// NO__ERRNO: declare double @llvm.ldexp.f64.i32(double, i32) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare float @llvm.ldexp.f32.i32(float, i32) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare x86_fp80 @llvm.ldexp.f80.i32(x86_fp80, i32) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare fp128 @llvm.ldexp.f128.i32(fp128, i32) [[READNONE_INTRINSIC]] // HAS_ERRNO: declare double @ldexp(double noundef, i32 noundef) [[NOT_READNONE]] // HAS_ERRNO: declare float @ldexpf(float noundef, i32 noundef) [[NOT_READNONE]] // HAS_ERRNO: declare x86_fp80 @ldexpl(x86_fp80 noundef, i32 noundef) [[NOT_READNONE]] Index: clang/test/CodeGenOpenCL/builtins-f16.cl =================================================================== --- clang/test/CodeGenOpenCL/builtins-f16.cl +++ clang/test/CodeGenOpenCL/builtins-f16.cl @@ -3,7 +3,7 @@ #pragma OPENCL EXTENSION cl_khr_fp16 : enable // CHECK-LABEL: define{{.*}} void @test_half_builtins -void test_half_builtins(half h0, half h1, half h2) { +void test_half_builtins(half h0, half h1, half h2, int i0) { volatile half res; // CHECK: call half @llvm.copysign.f16(half %h0, half %h1) @@ -68,4 +68,7 @@ // CHECK: call half @llvm.canonicalize.f16(half %h0) res = __builtin_canonicalizef16(h0); + + // CHECK: call half @llvm.ldexp.f16.i32(half %h0, i32 %i0) + res = __builtin_ldexpf16(h0, i0); }