diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.def b/llvm/include/llvm/IR/RuntimeLibcalls.def --- a/llvm/include/llvm/IR/RuntimeLibcalls.def +++ b/llvm/include/llvm/IR/RuntimeLibcalls.def @@ -110,12 +110,12 @@ HANDLE_LIBCALL(REM_F32, "fmodf") HANDLE_LIBCALL(REM_F64, "fmod") HANDLE_LIBCALL(REM_F80, "fmodl") -HANDLE_LIBCALL(REM_F128, "fmodl") +HANDLE_LIBCALL(REM_F128, "fmodf128") HANDLE_LIBCALL(REM_PPCF128, "fmodl") HANDLE_LIBCALL(FMA_F32, "fmaf") HANDLE_LIBCALL(FMA_F64, "fma") HANDLE_LIBCALL(FMA_F80, "fmal") -HANDLE_LIBCALL(FMA_F128, "fmal") +HANDLE_LIBCALL(FMA_F128, "fmaf128") HANDLE_LIBCALL(FMA_PPCF128, "fmal") HANDLE_LIBCALL(POWI_F32, "__powisf2") HANDLE_LIBCALL(POWI_F64, "__powidf2") @@ -125,72 +125,72 @@ HANDLE_LIBCALL(SQRT_F32, "sqrtf") HANDLE_LIBCALL(SQRT_F64, "sqrt") HANDLE_LIBCALL(SQRT_F80, "sqrtl") -HANDLE_LIBCALL(SQRT_F128, "sqrtl") +HANDLE_LIBCALL(SQRT_F128, "sqrtf128") HANDLE_LIBCALL(SQRT_PPCF128, "sqrtl") HANDLE_LIBCALL(CBRT_F32, "cbrtf") HANDLE_LIBCALL(CBRT_F64, "cbrt") HANDLE_LIBCALL(CBRT_F80, "cbrtl") -HANDLE_LIBCALL(CBRT_F128, "cbrtl") +HANDLE_LIBCALL(CBRT_F128, "cbrtf128") HANDLE_LIBCALL(CBRT_PPCF128, "cbrtl") HANDLE_LIBCALL(LOG_F32, "logf") HANDLE_LIBCALL(LOG_F64, "log") HANDLE_LIBCALL(LOG_F80, "logl") -HANDLE_LIBCALL(LOG_F128, "logl") +HANDLE_LIBCALL(LOG_F128, "logf128") HANDLE_LIBCALL(LOG_PPCF128, "logl") HANDLE_LIBCALL(LOG_FINITE_F32, "__logf_finite") HANDLE_LIBCALL(LOG_FINITE_F64, "__log_finite") HANDLE_LIBCALL(LOG_FINITE_F80, "__logl_finite") -HANDLE_LIBCALL(LOG_FINITE_F128, "__logl_finite") +HANDLE_LIBCALL(LOG_FINITE_F128, "__logf128_finite") HANDLE_LIBCALL(LOG_FINITE_PPCF128, "__logl_finite") HANDLE_LIBCALL(LOG2_F32, "log2f") HANDLE_LIBCALL(LOG2_F64, "log2") HANDLE_LIBCALL(LOG2_F80, "log2l") -HANDLE_LIBCALL(LOG2_F128, "log2l") +HANDLE_LIBCALL(LOG2_F128, "log2f128") HANDLE_LIBCALL(LOG2_PPCF128, "log2l") HANDLE_LIBCALL(LOG2_FINITE_F32, "__log2f_finite") HANDLE_LIBCALL(LOG2_FINITE_F64, "__log2_finite") HANDLE_LIBCALL(LOG2_FINITE_F80, "__log2l_finite") -HANDLE_LIBCALL(LOG2_FINITE_F128, "__log2l_finite") +HANDLE_LIBCALL(LOG2_FINITE_F128, "__log2f128_finite") HANDLE_LIBCALL(LOG2_FINITE_PPCF128, "__log2l_finite") HANDLE_LIBCALL(LOG10_F32, "log10f") HANDLE_LIBCALL(LOG10_F64, "log10") HANDLE_LIBCALL(LOG10_F80, "log10l") -HANDLE_LIBCALL(LOG10_F128, "log10l") +HANDLE_LIBCALL(LOG10_F128, "log10f128") HANDLE_LIBCALL(LOG10_PPCF128, "log10l") HANDLE_LIBCALL(LOG10_FINITE_F32, "__log10f_finite") HANDLE_LIBCALL(LOG10_FINITE_F64, "__log10_finite") HANDLE_LIBCALL(LOG10_FINITE_F80, "__log10l_finite") -HANDLE_LIBCALL(LOG10_FINITE_F128, "__log10l_finite") +HANDLE_LIBCALL(LOG10_FINITE_F128, "__log10f128_finite") HANDLE_LIBCALL(LOG10_FINITE_PPCF128, "__log10l_finite") HANDLE_LIBCALL(EXP_F32, "expf") HANDLE_LIBCALL(EXP_F64, "exp") HANDLE_LIBCALL(EXP_F80, "expl") -HANDLE_LIBCALL(EXP_F128, "expl") +HANDLE_LIBCALL(EXP_F128, "expf128") HANDLE_LIBCALL(EXP_PPCF128, "expl") HANDLE_LIBCALL(EXP_FINITE_F32, "__expf_finite") HANDLE_LIBCALL(EXP_FINITE_F64, "__exp_finite") HANDLE_LIBCALL(EXP_FINITE_F80, "__expl_finite") -HANDLE_LIBCALL(EXP_FINITE_F128, "__expl_finite") +HANDLE_LIBCALL(EXP_FINITE_F128, "__expf128_finite") HANDLE_LIBCALL(EXP_FINITE_PPCF128, "__expl_finite") HANDLE_LIBCALL(EXP2_F32, "exp2f") HANDLE_LIBCALL(EXP2_F64, "exp2") HANDLE_LIBCALL(EXP2_F80, "exp2l") -HANDLE_LIBCALL(EXP2_F128, "exp2l") +HANDLE_LIBCALL(EXP2_F128, "exp2f128") HANDLE_LIBCALL(EXP2_PPCF128, "exp2l") HANDLE_LIBCALL(EXP2_FINITE_F32, "__exp2f_finite") HANDLE_LIBCALL(EXP2_FINITE_F64, "__exp2_finite") HANDLE_LIBCALL(EXP2_FINITE_F80, "__exp2l_finite") -HANDLE_LIBCALL(EXP2_FINITE_F128, "__exp2l_finite") +HANDLE_LIBCALL(EXP2_FINITE_F128, "__exp2f128_finite") HANDLE_LIBCALL(EXP2_FINITE_PPCF128, "__exp2l_finite") HANDLE_LIBCALL(SIN_F32, "sinf") HANDLE_LIBCALL(SIN_F64, "sin") HANDLE_LIBCALL(SIN_F80, "sinl") -HANDLE_LIBCALL(SIN_F128, "sinl") +HANDLE_LIBCALL(SIN_F128, "sinf128") HANDLE_LIBCALL(SIN_PPCF128, "sinl") HANDLE_LIBCALL(COS_F32, "cosf") HANDLE_LIBCALL(COS_F64, "cos") HANDLE_LIBCALL(COS_F80, "cosl") -HANDLE_LIBCALL(COS_F128, "cosl") +HANDLE_LIBCALL(COS_F128, "cosf128") HANDLE_LIBCALL(COS_PPCF128, "cosl") HANDLE_LIBCALL(SINCOS_F32, nullptr) HANDLE_LIBCALL(SINCOS_F64, nullptr) @@ -202,92 +202,92 @@ HANDLE_LIBCALL(POW_F32, "powf") HANDLE_LIBCALL(POW_F64, "pow") HANDLE_LIBCALL(POW_F80, "powl") -HANDLE_LIBCALL(POW_F128, "powl") +HANDLE_LIBCALL(POW_F128, "powf128") HANDLE_LIBCALL(POW_PPCF128, "powl") HANDLE_LIBCALL(POW_FINITE_F32, "__powf_finite") HANDLE_LIBCALL(POW_FINITE_F64, "__pow_finite") HANDLE_LIBCALL(POW_FINITE_F80, "__powl_finite") -HANDLE_LIBCALL(POW_FINITE_F128, "__powl_finite") +HANDLE_LIBCALL(POW_FINITE_F128, "__powf128_finite") HANDLE_LIBCALL(POW_FINITE_PPCF128, "__powl_finite") HANDLE_LIBCALL(CEIL_F32, "ceilf") HANDLE_LIBCALL(CEIL_F64, "ceil") HANDLE_LIBCALL(CEIL_F80, "ceill") -HANDLE_LIBCALL(CEIL_F128, "ceill") +HANDLE_LIBCALL(CEIL_F128, "ceilf128") HANDLE_LIBCALL(CEIL_PPCF128, "ceill") HANDLE_LIBCALL(TRUNC_F32, "truncf") HANDLE_LIBCALL(TRUNC_F64, "trunc") HANDLE_LIBCALL(TRUNC_F80, "truncl") -HANDLE_LIBCALL(TRUNC_F128, "truncl") +HANDLE_LIBCALL(TRUNC_F128, "truncf128") HANDLE_LIBCALL(TRUNC_PPCF128, "truncl") HANDLE_LIBCALL(RINT_F32, "rintf") HANDLE_LIBCALL(RINT_F64, "rint") HANDLE_LIBCALL(RINT_F80, "rintl") -HANDLE_LIBCALL(RINT_F128, "rintl") +HANDLE_LIBCALL(RINT_F128, "rintf128") HANDLE_LIBCALL(RINT_PPCF128, "rintl") HANDLE_LIBCALL(NEARBYINT_F32, "nearbyintf") HANDLE_LIBCALL(NEARBYINT_F64, "nearbyint") HANDLE_LIBCALL(NEARBYINT_F80, "nearbyintl") -HANDLE_LIBCALL(NEARBYINT_F128, "nearbyintl") +HANDLE_LIBCALL(NEARBYINT_F128, "nearbyintf128") HANDLE_LIBCALL(NEARBYINT_PPCF128, "nearbyintl") HANDLE_LIBCALL(ROUND_F32, "roundf") HANDLE_LIBCALL(ROUND_F64, "round") HANDLE_LIBCALL(ROUND_F80, "roundl") -HANDLE_LIBCALL(ROUND_F128, "roundl") +HANDLE_LIBCALL(ROUND_F128, "roundf128") HANDLE_LIBCALL(ROUND_PPCF128, "roundl") HANDLE_LIBCALL(ROUNDEVEN_F32, "roundevenf") HANDLE_LIBCALL(ROUNDEVEN_F64, "roundeven") HANDLE_LIBCALL(ROUNDEVEN_F80, "roundevenl") -HANDLE_LIBCALL(ROUNDEVEN_F128, "roundevenl") +HANDLE_LIBCALL(ROUNDEVEN_F128, "roundevenf128") HANDLE_LIBCALL(ROUNDEVEN_PPCF128, "roundevenl") HANDLE_LIBCALL(FLOOR_F32, "floorf") HANDLE_LIBCALL(FLOOR_F64, "floor") HANDLE_LIBCALL(FLOOR_F80, "floorl") -HANDLE_LIBCALL(FLOOR_F128, "floorl") +HANDLE_LIBCALL(FLOOR_F128, "floorf128") HANDLE_LIBCALL(FLOOR_PPCF128, "floorl") HANDLE_LIBCALL(COPYSIGN_F32, "copysignf") HANDLE_LIBCALL(COPYSIGN_F64, "copysign") HANDLE_LIBCALL(COPYSIGN_F80, "copysignl") -HANDLE_LIBCALL(COPYSIGN_F128, "copysignl") +HANDLE_LIBCALL(COPYSIGN_F128, "copysignf128") HANDLE_LIBCALL(COPYSIGN_PPCF128, "copysignl") HANDLE_LIBCALL(FMIN_F32, "fminf") HANDLE_LIBCALL(FMIN_F64, "fmin") HANDLE_LIBCALL(FMIN_F80, "fminl") -HANDLE_LIBCALL(FMIN_F128, "fminl") +HANDLE_LIBCALL(FMIN_F128, "fminf128") HANDLE_LIBCALL(FMIN_PPCF128, "fminl") HANDLE_LIBCALL(FMAX_F32, "fmaxf") HANDLE_LIBCALL(FMAX_F64, "fmax") HANDLE_LIBCALL(FMAX_F80, "fmaxl") -HANDLE_LIBCALL(FMAX_F128, "fmaxl") +HANDLE_LIBCALL(FMAX_F128, "fmaxf128") HANDLE_LIBCALL(FMAX_PPCF128, "fmaxl") HANDLE_LIBCALL(LROUND_F32, "lroundf") HANDLE_LIBCALL(LROUND_F64, "lround") HANDLE_LIBCALL(LROUND_F80, "lroundl") -HANDLE_LIBCALL(LROUND_F128, "lroundl") +HANDLE_LIBCALL(LROUND_F128, "lroundf128") HANDLE_LIBCALL(LROUND_PPCF128, "lroundl") HANDLE_LIBCALL(LLROUND_F32, "llroundf") HANDLE_LIBCALL(LLROUND_F64, "llround") HANDLE_LIBCALL(LLROUND_F80, "llroundl") -HANDLE_LIBCALL(LLROUND_F128, "llroundl") +HANDLE_LIBCALL(LLROUND_F128, "llroundf128") HANDLE_LIBCALL(LLROUND_PPCF128, "llroundl") HANDLE_LIBCALL(LRINT_F32, "lrintf") HANDLE_LIBCALL(LRINT_F64, "lrint") HANDLE_LIBCALL(LRINT_F80, "lrintl") -HANDLE_LIBCALL(LRINT_F128, "lrintl") +HANDLE_LIBCALL(LRINT_F128, "lrintf128") HANDLE_LIBCALL(LRINT_PPCF128, "lrintl") HANDLE_LIBCALL(LLRINT_F32, "llrintf") HANDLE_LIBCALL(LLRINT_F64, "llrint") HANDLE_LIBCALL(LLRINT_F80, "llrintl") -HANDLE_LIBCALL(LLRINT_F128, "llrintl") +HANDLE_LIBCALL(LLRINT_F128, "llrintf128") HANDLE_LIBCALL(LLRINT_PPCF128, "llrintl") HANDLE_LIBCALL(LDEXP_F32, "ldexpf") HANDLE_LIBCALL(LDEXP_F64, "ldexp") HANDLE_LIBCALL(LDEXP_F80, "ldexpl") -HANDLE_LIBCALL(LDEXP_F128, "ldexpl") +HANDLE_LIBCALL(LDEXP_F128, "ldexpf128") HANDLE_LIBCALL(LDEXP_PPCF128, "ldexpl") HANDLE_LIBCALL(FREXP_F32, "frexpf") HANDLE_LIBCALL(FREXP_F64, "frexp") HANDLE_LIBCALL(FREXP_F80, "frexpl") -HANDLE_LIBCALL(FREXP_F128, "frexpl") +HANDLE_LIBCALL(FREXP_F128, "frexpf128") HANDLE_LIBCALL(FREXP_PPCF128, "frexpl") // Floating point environment diff --git a/llvm/test/CodeGen/X86/f128-arith.ll b/llvm/test/CodeGen/X86/f128-arith.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/f128-arith.ll @@ -0,0 +1,515 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-64 +; +; Test lowering of fp128 intrinsics + +define fp128 @test_cbrtf128(fp128 %a) { +; CHECK-32-LABEL: test_cbrtf128: +; CHECK-32: calll llvm.cbrt.f128@PLT +; +; CHECK-64-LABEL: test_cbrtf128: +; CHECK-64: jmp llvm.cbrt.f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.cbrt.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.cbrt.f128(fp128) + + +define fp128 @test_ceilf128(fp128 %a) { +; CHECK-32-LABEL: test_ceilf128: +; CHECK-32: calll ceilf128 +; +; CHECK-64-LABEL: test_ceilf128: +; CHECK-64: jmp ceilf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.ceil.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.ceil.f128(fp128) + + +define fp128 @test_copysignf128(fp128 %a, fp128 %b) { +; CHECK-32-LABEL: test_copysignf128: +; CHECK-32: # %bb.0: # %start +; CHECK-32-NEXT: pushl %ebx +; CHECK-32-NEXT: .cfi_def_cfa_offset 8 +; CHECK-32-NEXT: pushl %edi +; CHECK-32-NEXT: .cfi_def_cfa_offset 12 +; CHECK-32-NEXT: pushl %esi +; CHECK-32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-32-NEXT: .cfi_offset %esi, -16 +; CHECK-32-NEXT: .cfi_offset %edi, -12 +; CHECK-32-NEXT: .cfi_offset %ebx, -8 +; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edx +; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK-32-NEXT: movl $-2147483648, %edi # imm = 0x80000000 +; CHECK-32-NEXT: andl {{[0-9]+}}(%esp), %edi +; CHECK-32-NEXT: movl $2147483647, %ebx # imm = 0x7FFFFFFF +; CHECK-32-NEXT: andl {{[0-9]+}}(%esp), %ebx +; CHECK-32-NEXT: orl %edi, %ebx +; CHECK-32-NEXT: movl %ebx, 12(%eax) +; CHECK-32-NEXT: movl %esi, 8(%eax) +; CHECK-32-NEXT: movl %edx, 4(%eax) +; CHECK-32-NEXT: movl %ecx, (%eax) +; CHECK-32-NEXT: popl %esi +; CHECK-32-NEXT: .cfi_def_cfa_offset 12 +; CHECK-32-NEXT: popl %edi +; CHECK-32-NEXT: .cfi_def_cfa_offset 8 +; CHECK-32-NEXT: popl %ebx +; CHECK-32-NEXT: .cfi_def_cfa_offset 4 +; CHECK-32-NEXT: retl $4 +; +; CHECK-64-LABEL: test_copysignf128: +; CHECK-64: # %bb.0: # %start +; CHECK-64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-64-NEXT: orps %xmm1, %xmm0 +; CHECK-64-NEXT: retq +; FIXME: calling long double rather than f128 function +; FIXME: calling long double rather than f128 function +start: + %0 = tail call fp128 @llvm.copysign.f128(fp128 %a, fp128 %b) + ret fp128 %0 +} + +declare fp128 @llvm.copysign.f128(fp128, fp128) + + +define fp128 @test_cosf128(fp128 %a) { +; CHECK-32-LABEL: test_cosf128: +; CHECK-32: calll cosf128 +; +; CHECK-64-LABEL: test_cosf128: +; CHECK-64: jmp cosf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.cos.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.cos.f128(fp128) + + +define fp128 @test_exp2f128(fp128 %a) { +; CHECK-32-LABEL: test_exp2f128: +; CHECK-32: calll exp2f128 +; +; CHECK-64-LABEL: test_exp2f128: +; CHECK-64: jmp exp2f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.exp2.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.exp2.f128(fp128) + + +define fp128 @test___exp2f128_finite(fp128 %a) { +; CHECK-32-LABEL: test___exp2f128_finite: +; CHECK-32: calll llvm.__exp2f128_finite.f128@PLT +; +; CHECK-64-LABEL: test___exp2f128_finite: +; CHECK-64: jmp llvm.__exp2f128_finite.f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.__exp2f128_finite.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.__exp2f128_finite.f128(fp128) + + +define fp128 @test_expf128(fp128 %a) { +; CHECK-32-LABEL: test_expf128: +; CHECK-32: calll expf128 +; +; CHECK-64-LABEL: test_expf128: +; CHECK-64: jmp expf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.exp.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.exp.f128(fp128) + + +define fp128 @test___expf128_finite(fp128 %a) { +; CHECK-32-LABEL: test___expf128_finite: +; CHECK-32: calll llvm.__expf128_finite.f128@PLT +; +; CHECK-64-LABEL: test___expf128_finite: +; CHECK-64: jmp llvm.__expf128_finite.f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.__expf128_finite.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.__expf128_finite.f128(fp128) + + +define fp128 @test_floorf128(fp128 %a) { +; CHECK-32-LABEL: test_floorf128: +; CHECK-32: calll floorf128 +; +; CHECK-64-LABEL: test_floorf128: +; CHECK-64: jmp floorf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.floor.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.floor.f128(fp128) + + +define fp128 @test_fmaf128(fp128 %a, fp128 %b, fp128 %c) { +; CHECK-32-LABEL: test_fmaf128: +; CHECK-32: calll fmaf128 +; +; CHECK-64-LABEL: test_fmaf128: +; CHECK-64: jmp fmaf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.fma.f128(fp128 %a, fp128 %b, fp128 %c) + ret fp128 %0 +} + +declare fp128 @llvm.fma.f128(fp128, fp128, fp128) + + +define fp128 @test_fmaxf128(fp128 %a, fp128 %b) { +; CHECK-32-LABEL: test_fmaxf128: +; CHECK-32: calll llvm.fmax.f128@PLT +; +; CHECK-64-LABEL: test_fmaxf128: +; CHECK-64: jmp llvm.fmax.f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.fmax.f128(fp128 %a, fp128 %b) + ret fp128 %0 +} + +declare fp128 @llvm.fmax.f128(fp128, fp128) + + +define fp128 @test_fminf128(fp128 %a, fp128 %b) { +; CHECK-32-LABEL: test_fminf128: +; CHECK-32: calll llvm.fmin.f128@PLT +; +; CHECK-64-LABEL: test_fminf128: +; CHECK-64: jmp llvm.fmin.f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.fmin.f128(fp128 %a, fp128 %b) + ret fp128 %0 +} + +declare fp128 @llvm.fmin.f128(fp128, fp128) + + +define fp128 @test_fmodf128(fp128 %a, fp128 %b) { +; CHECK-32-LABEL: test_fmodf128: +; CHECK-32: calll llvm.fmod.f128@PLT +; +; CHECK-64-LABEL: test_fmodf128: +; CHECK-64: jmp llvm.fmod.f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.fmod.f128(fp128 %a, fp128 %b) + ret fp128 %0 +} + +declare fp128 @llvm.fmod.f128(fp128, fp128) + + +define { fp128, i32 } @test_frexpf128(fp128 %a) { +; CHECK-32-LABEL: test_frexpf128: +; CHECK-32: calll frexpf128 +; +; CHECK-64-LABEL: test_frexpf128: +; CHECK-64: callq frexpf128@PLT +start: + %0 = tail call { fp128, i32 } @llvm.frexp.f128(fp128 %a) + ret { fp128, i32 } %0 +} + +declare { fp128, i32 } @llvm.frexp.f128(fp128) + + +define fp128 @test_ldexpf128(fp128 %a, i32 %b) { +; CHECK-32-LABEL: test_ldexpf128: +; CHECK-32: calll ldexpf128 +; +; CHECK-64-LABEL: test_ldexpf128: +; CHECK-64: jmp ldexpf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.ldexp.f128(fp128 %a, i32 %b) + ret fp128 %0 +} + +declare fp128 @llvm.ldexp.f128(fp128, i32) + + +define i64 @test_llrintf128(fp128 %a) { +; CHECK-32-LABEL: test_llrintf128: +; CHECK-32: calll llrintf128 +; +; CHECK-64-LABEL: test_llrintf128: +; CHECK-64: jmp llrintf128@PLT # TAILCALL +start: + %0 = tail call i64 @llvm.llrint.f128(fp128 %a) + ret i64 %0 +} + +declare i64 @llvm.llrint.f128(fp128) + + +define i64 @test_llroundf128(fp128 %a) { +; CHECK-32-LABEL: test_llroundf128: +; CHECK-32: calll llroundf128 +; +; CHECK-64-LABEL: test_llroundf128: +; CHECK-64: jmp llroundf128@PLT # TAILCALL +start: + %0 = tail call i64 @llvm.llround.i64.f128(fp128 %a) + ret i64 %0 +} + +declare i64 @llvm.llround.i64.f128(fp128) + + +define fp128 @test_log10f128(fp128 %a) { +; CHECK-32-LABEL: test_log10f128: +; CHECK-32: calll log10f128 +; +; CHECK-64-LABEL: test_log10f128: +; CHECK-64: jmp log10f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.log10.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.log10.f128(fp128) + + +define fp128 @test___log10f128_finite(fp128 %a) { +; CHECK-32-LABEL: test___log10f128_finite: +; CHECK-32: calll llvm.__log10f128_finite.f128@PLT +; +; CHECK-64-LABEL: test___log10f128_finite: +; CHECK-64: jmp llvm.__log10f128_finite.f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.__log10f128_finite.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.__log10f128_finite.f128(fp128) + + +define fp128 @test_log2f128(fp128 %a) { +; CHECK-32-LABEL: test_log2f128: +; CHECK-32: calll log2f128 +; +; CHECK-64-LABEL: test_log2f128: +; CHECK-64: jmp log2f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.log2.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.log2.f128(fp128) + + +define fp128 @test___log2f128_finite(fp128 %a) { +; CHECK-32-LABEL: test___log2f128_finite: +; CHECK-32: calll llvm.__log2f128_finite.f128@PLT +; +; CHECK-64-LABEL: test___log2f128_finite: +; CHECK-64: jmp llvm.__log2f128_finite.f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.__log2f128_finite.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.__log2f128_finite.f128(fp128) + + +define fp128 @test_logf128(fp128 %a) { +; CHECK-32-LABEL: test_logf128: +; CHECK-32: calll logf128 +; +; CHECK-64-LABEL: test_logf128: +; CHECK-64: jmp logf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.log.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.log.f128(fp128) + + +define fp128 @test___logf128_finite(fp128 %a) { +; CHECK-32-LABEL: test___logf128_finite: +; CHECK-32: calll llvm.__logf128_finite.f128@PLT +; +; CHECK-64-LABEL: test___logf128_finite: +; CHECK-64: jmp llvm.__logf128_finite.f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.__logf128_finite.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.__logf128_finite.f128(fp128) + + +define i64 @test_lrintf128(fp128 %a) { +; CHECK-32-LABEL: test_lrintf128: +; CHECK-32: calll lrintf128 +; +; CHECK-64-LABEL: test_lrintf128: +; CHECK-64: jmp lrintf128@PLT # TAILCALL +start: + %0 = tail call i64 @llvm.lrint.f128(fp128 %a) + ret i64 %0 +} + +declare i64 @llvm.lrint.f128(fp128) + + +define i64 @test_lroundf128(fp128 %a) { +; CHECK-32-LABEL: test_lroundf128: +; CHECK-32: calll lroundf128 +; +; CHECK-64-LABEL: test_lroundf128: +; CHECK-64: jmp lroundf128@PLT # TAILCALL +start: + %0 = tail call i64 @llvm.lround.i64.f128(fp128 %a) + ret i64 %0 +} + +declare i64 @llvm.lround.i64.f128(fp128) + + +define fp128 @test_nearbyintf128(fp128 %a) { +; CHECK-32-LABEL: test_nearbyintf128: +; CHECK-32: calll nearbyintf128 +; +; CHECK-64-LABEL: test_nearbyintf128: +; CHECK-64: jmp nearbyintf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.nearbyint.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.nearbyint.f128(fp128) + + +define fp128 @test_powf128(fp128 %a, fp128 %b) { +; CHECK-32-LABEL: test_powf128: +; CHECK-32: calll powf128 +; +; CHECK-64-LABEL: test_powf128: +; CHECK-64: jmp powf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.pow.f128(fp128 %a, fp128 %b) + ret fp128 %0 +} + +declare fp128 @llvm.pow.f128(fp128, fp128) + + +define fp128 @test___powf128_finite(fp128 %a, fp128 %b) { +; CHECK-32-LABEL: test___powf128_finite: +; CHECK-32: calll llvm.__powf128_finite.f128@PLT +; +; CHECK-64-LABEL: test___powf128_finite: +; CHECK-64: jmp llvm.__powf128_finite.f128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.__powf128_finite.f128(fp128 %a, fp128 %b) + ret fp128 %0 +} + +declare fp128 @llvm.__powf128_finite.f128(fp128, fp128) + + +define fp128 @test_rintf128(fp128 %a) { +; CHECK-32-LABEL: test_rintf128: +; CHECK-32: calll rintf128 +; +; CHECK-64-LABEL: test_rintf128: +; CHECK-64: jmp rintf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.rint.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.rint.f128(fp128) + + +define fp128 @test_roundevenf128(fp128 %a) { +; CHECK-32-LABEL: test_roundevenf128: +; CHECK-32: calll roundevenf128 +; +; CHECK-64-LABEL: test_roundevenf128: +; CHECK-64: jmp roundevenf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.roundeven.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.roundeven.f128(fp128) + + +define fp128 @test_roundf128(fp128 %a) { +; CHECK-32-LABEL: test_roundf128: +; CHECK-32: calll roundf128 +; +; CHECK-64-LABEL: test_roundf128: +; CHECK-64: jmp roundf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.round.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.round.f128(fp128) + + +define fp128 @test_sinf128(fp128 %a) { +; CHECK-32-LABEL: test_sinf128: +; CHECK-32: calll sinf128 +; +; CHECK-64-LABEL: test_sinf128: +; CHECK-64: jmp sinf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.sin.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.sin.f128(fp128) + + +define fp128 @test_sqrtf128(fp128 %a) { +; CHECK-32-LABEL: test_sqrtf128: +; CHECK-32: calll sqrtf128 +; +; CHECK-64-LABEL: test_sqrtf128: +; CHECK-64: jmp sqrtf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.sqrt.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.sqrt.f128(fp128) + + +define fp128 @test_truncf128(fp128 %a) { +; CHECK-32-LABEL: test_truncf128: +; CHECK-32: calll truncf128 +; +; CHECK-64-LABEL: test_truncf128: +; CHECK-64: jmp truncf128@PLT # TAILCALL +start: + %0 = tail call fp128 @llvm.trunc.f128(fp128 %a) + ret fp128 %0 +} + +declare fp128 @llvm.trunc.f128(fp128)