diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar.ll b/llvm/test/CodeGen/X86/fp-strict-scalar.ll --- a/llvm/test/CodeGen/X86/fp-strict-scalar.ll +++ b/llvm/test/CodeGen/X86/fp-strict-scalar.ll @@ -16,7 +16,7 @@ declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata) -declare float @llvm.experimental.constrained.fptrunc.f64.f32(double, metadata, metadata) +declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata) declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata) declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) @@ -480,7 +480,7 @@ ; X87-NEXT: popl %eax ; X87-NEXT: retl %1 = load double, double* %val, align 8 - %res = call float @llvm.experimental.constrained.fptrunc.f64.f32(double %1, + %res = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %1, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 store float %res, float* %ret, align 4 diff --git a/llvm/test/CodeGen/X86/fp80-strict-scalar.ll b/llvm/test/CodeGen/X86/fp80-strict-scalar.ll --- a/llvm/test/CodeGen/X86/fp80-strict-scalar.ll +++ b/llvm/test/CodeGen/X86/fp80-strict-scalar.ll @@ -9,8 +9,8 @@ declare x86_fp80 @llvm.experimental.constrained.fpext.x86_fp80.f32(float, metadata) declare x86_fp80 @llvm.experimental.constrained.fpext.x86_fp80.f64(double, metadata) declare x86_fp80 @llvm.experimental.constrained.sqrt.x86_fp80(x86_fp80, metadata, metadata) -declare float @llvm.experimental.constrained.fptrunc.x86_fp80.f32(x86_fp80, metadata, metadata) -declare double @llvm.experimental.constrained.fptrunc.x86_fp80.f64(x86_fp80, metadata, metadata) +declare float @llvm.experimental.constrained.fptrunc.f32.x86_fp80(x86_fp80, metadata, metadata) +declare double @llvm.experimental.constrained.fptrunc.f64.x86_fp80(x86_fp80, metadata, metadata) define x86_fp80 @fadd_fp80(x86_fp80 %a, x86_fp80 %b) nounwind strictfp { ; X86-LABEL: fadd_fp80: @@ -106,7 +106,6 @@ %ret = call x86_fp80 @llvm.experimental.constrained.fpext.x86_fp80.f32(float %a, metadata !"fpexcept.strict") #0 ret x86_fp80 %ret - } define x86_fp80 @fpext_f64_to_fp80(double %a) nounwind strictfp { @@ -123,7 +122,6 @@ %ret = call x86_fp80 @llvm.experimental.constrained.fpext.x86_fp80.f64(double %a, metadata !"fpexcept.strict") #0 ret x86_fp80 %ret - } define float @fptrunc_fp80_to_f32(x86_fp80 %a) nounwind strictfp { @@ -142,11 +140,10 @@ ; X64-NEXT: fstps -{{[0-9]+}}(%rsp) ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X64-NEXT: retq - %ret = call float @llvm.experimental.constrained.fptrunc.x86_fp80.f32(x86_fp80 %a, + %ret = call float @llvm.experimental.constrained.fptrunc.f32.x86_fp80(x86_fp80 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret float %ret - } define double @fptrunc_fp80_to_f64(x86_fp80 %a) nounwind strictfp { @@ -169,11 +166,10 @@ ; X64-NEXT: fstpl -{{[0-9]+}}(%rsp) ; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X64-NEXT: retq - %ret = call double @llvm.experimental.constrained.fptrunc.x86_fp80.f64(x86_fp80 %a, + %ret = call double @llvm.experimental.constrained.fptrunc.f64.x86_fp80(x86_fp80 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret double %ret - } define x86_fp80 @fsqrt_fp80(x86_fp80 %a) nounwind strictfp { @@ -192,7 +188,6 @@ metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret x86_fp80 %ret - } attributes #0 = { strictfp }