Index: llvm/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.cpp +++ llvm/lib/Target/X86/X86ISelLowering.cpp @@ -21882,6 +21882,11 @@ } assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); + + // On Windows, the default precision control on x87 is only 53-bit. + if (Subtarget.isOSWindows()) + return SDValue(); + SDValue ValueToStore = Src; if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) { // Bitcasting to f64 here allows us to do a single 64-bit store from Index: llvm/test/CodeGen/X86/uint64-to-float.ll =================================================================== --- llvm/test/CodeGen/X86/uint64-to-float.ll +++ llvm/test/CodeGen/X86/uint64-to-float.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-apple-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 -; RUN: llc < %s -mtriple=x86_64-apple-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=i686-win32 -mattr=+sse2 | FileCheck %s --check-prefix=X86-WIN32 ; Verify that we are using the efficient uitofp --> sitofp lowering illustrated ; by the compiler_rt implementation of __floatundisf. @@ -42,6 +43,20 @@ ; X64-NEXT: cvtsi2ss %rdi, %xmm0 ; X64-NEXT: addss %xmm0, %xmm0 ; X64-NEXT: retq +; +; X86-WIN32-LABEL: test: +; X86-WIN32: # %bb.0: # %entry +; X86-WIN32-NEXT: pushl %ebp +; X86-WIN32-NEXT: movl %esp, %ebp +; X86-WIN32-NEXT: andl $-8, %esp +; X86-WIN32-NEXT: subl $8, %esp +; X86-WIN32-NEXT: pushl 12(%ebp) +; X86-WIN32-NEXT: pushl 8(%ebp) +; X86-WIN32-NEXT: calll ___floatundisf +; X86-WIN32-NEXT: addl $8, %esp +; X86-WIN32-NEXT: movl %ebp, %esp +; X86-WIN32-NEXT: popl %ebp +; X86-WIN32-NEXT: retl entry: %b = uitofp i64 %a to float ret float %b