diff --git a/llvm/lib/Target/X86/X86InstrMMX.td b/llvm/lib/Target/X86/X86InstrMMX.td --- a/llvm/lib/Target/X86/X86InstrMMX.td +++ b/llvm/lib/Target/X86/X86InstrMMX.td @@ -508,16 +508,16 @@ // -- Conversion Instructions defm MMX_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi, f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}", - WriteCvtPS2I, SSEPackedSingle>, PS; + WriteCvtPS2I, SSEPackedSingle>, PS, SIMD_EXC; defm MMX_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi, f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}", - WriteCvtPD2I, SSEPackedDouble>, PD; + WriteCvtPD2I, SSEPackedDouble>, PD, SIMD_EXC; defm MMX_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi, f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}", - WriteCvtPS2I, SSEPackedSingle>, PS; + WriteCvtPS2I, SSEPackedSingle>, PS, SIMD_EXC; defm MMX_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi, f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}", - WriteCvtPD2I, SSEPackedDouble>, PD; + WriteCvtPD2I, SSEPackedDouble>, PD, SIMD_EXC; defm MMX_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd, i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}", WriteCvtI2PD, SSEPackedDouble>, PD; @@ -525,7 +525,7 @@ defm MMX_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128, int_x86_sse_cvtpi2ps, i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}", - SSEPackedSingle>, PS; + SSEPackedSingle>, PS, SIMD_EXC; } // Extract / Insert diff --git a/llvm/test/CodeGen/X86/mmx-reg-usage.ll b/llvm/test/CodeGen/X86/mmx-reg-usage.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/mmx-reg-usage.ll @@ -0,0 +1,24 @@ +; RUN: llc -march=x86-64 -mattr=+mmx -stop-after finalize-isel -o - %s | FileCheck %s +; This test ensures that the MXCSR is implicitly used by MMX FP instructions. + +define x86_mmx @stack_fold_cvtps2pi(<4 x float> %a0) { + %1 = call x86_mmx @llvm.x86.sse.cvtps2pi(<4 x float> %a0) + %2 = call <4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float> %a0, x86_mmx %1) + %3 = call x86_mmx @llvm.x86.sse.cvttps2pi(<4 x float> %2) + %4 = call <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx %3) + %5 = call x86_mmx @llvm.x86.sse.cvtpd2pi(<2 x double> %4) + ret x86_mmx %5 +} + +declare x86_mmx @llvm.x86.sse.cvtps2pi(<4 x float>) +declare<4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float>, x86_mmx) +declare x86_mmx @llvm.x86.sse.cvttps2pi(<4 x float>) +declare <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx) +declare x86_mmx @llvm.x86.sse.cvtpd2pi(<2 x double>) + +; CHECK: MMX_CVTPS2PIirr %0, implicit $mxcsr +; CHECK: MMX_CVTPI2PSirr %0, killed %1, implicit $mxcsr +; CHECK: MMX_CVTTPS2PIirr killed %2, implicit $mxcsr +; CHECK: MMX_CVTPI2PDirr killed %3{{$}} +; CHECK: MMX_CVTPD2PIirr killed %4, implicit $mxcsr +