Index: lib/Target/X86/X86FastISel.cpp =================================================================== --- lib/Target/X86/X86FastISel.cpp +++ lib/Target/X86/X86FastISel.cpp @@ -434,6 +434,7 @@ X86AddressMode &AM, MachineMemOperand *MMO, bool Aligned) { bool HasSSE2 = Subtarget->hasSSE2(); + bool HasSSE4A = Subtarget->hasSSE4A(); bool HasAVX = Subtarget->hasAVX(); bool IsNonTemporal = MMO && MMO->isNonTemporal(); @@ -461,12 +462,22 @@ Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr; break; case MVT::f32: - Opc = X86ScalarSSEf32 ? - (HasAVX ? X86::VMOVSSmr : X86::MOVSSmr) : X86::ST_Fp32m; + if (X86ScalarSSEf32) { + if (IsNonTemporal && HasSSE4A) + Opc = X86::MOVNTSS; + else + Opc = HasAVX ? X86::VMOVSSmr : X86::MOVSSmr; + } else + Opc = X86::ST_Fp32m; break; case MVT::f64: - Opc = X86ScalarSSEf64 ? - (HasAVX ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m; + if (X86ScalarSSEf32) { + if (IsNonTemporal && HasSSE4A) + Opc = X86::MOVNTSD; + else + Opc = HasAVX ? X86::VMOVSDmr : X86::MOVSDmr; + } else + Opc = X86::ST_Fp64m; break; case MVT::v4f32: if (Aligned) { @@ -1937,7 +1948,7 @@ const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); unsigned ResultReg; - + if (Subtarget->hasAVX()) { const TargetRegisterClass *FR32 = &X86::FR32RegClass; const TargetRegisterClass *VR128 = &X86::VR128RegClass; @@ -1951,7 +1962,7 @@ (RetVT.SimpleTy == MVT::f32) ? X86::VCMPSSrr : X86::VCMPSDrr; unsigned BlendOpcode = (RetVT.SimpleTy == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr; - + unsigned CmpReg = fastEmitInst_rri(CmpOpcode, FR32, CmpLHSReg, CmpLHSIsKill, CmpRHSReg, CmpRHSIsKill, CC); unsigned VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill, @@ -2289,7 +2300,7 @@ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::VMOVPDI2DIrr), ResultReg) .addReg(InputReg, RegState::Kill); - + // The result value is in the lower 16-bits of ResultReg. unsigned RegIdx = X86::sub_16bit; ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx); @@ -3270,7 +3281,7 @@ unsigned Reg = getRegForValue(I->getOperand(0)); if (Reg == 0) return false; - + // No instruction is needed for conversion. Reuse the register used by // the fist operand. updateValueMap(I, Reg); Index: test/CodeGen/X86/fast-isel-nontemporal.ll =================================================================== --- test/CodeGen/X86/fast-isel-nontemporal.ll +++ test/CodeGen/X86/fast-isel-nontemporal.ll @@ -1,4 +1,5 @@ -; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE2 +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2 +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4a -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE4A ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX define void @test_nti32(i32* nocapture %ptr, i32 %X) { @@ -21,12 +22,52 @@ ret void } -define void @test_nt4xfloat(<4 x float>* nocapture %ptr, <4 x float> %X) { -; SSE2-LABEL: test_nt4xfloat: +define void @test_ntfloat(float* nocapture %ptr, float %X) { +; SSE2-LABEL: test_ntfloat: +; SSE2: # BB#0: # %entry +; SSE2-NEXT: movss %xmm0, (%rdi) +; SSE2-NEXT: retq +; +; SSE4A-LABEL: test_ntfloat: +; SSE4A: # BB#0: # %entry +; SSE4A-NEXT: movntss %xmm0, (%rdi) +; SSE4A-NEXT: retq +; +; AVX-LABEL: test_ntfloat: +; AVX: # BB#0: # %entry +; AVX-NEXT: vmovss %xmm0, (%rdi) +; AVX-NEXT: retq +entry: + store float %X, float* %ptr, align 4, !nontemporal !1 + ret void +} + +define void @test_ntdouble(double* nocapture %ptr, double %X) { +; SSE2-LABEL: test_ntdouble: ; SSE2: # BB#0: # %entry -; SSE2-NEXT: movntps %xmm0, (%rdi) +; SSE2-NEXT: movsd %xmm0, (%rdi) ; SSE2-NEXT: retq ; +; SSE4A-LABEL: test_ntdouble: +; SSE4A: # BB#0: # %entry +; SSE4A-NEXT: movntsd %xmm0, (%rdi) +; SSE4A-NEXT: retq +; +; AVX-LABEL: test_ntdouble: +; AVX: # BB#0: # %entry +; AVX-NEXT: vmovsd %xmm0, (%rdi) +; AVX-NEXT: retq +entry: + store double %X, double* %ptr, align 8, !nontemporal !1 + ret void +} + +define void @test_nt4xfloat(<4 x float>* nocapture %ptr, <4 x float> %X) { +; SSE-LABEL: test_nt4xfloat: +; SSE: # BB#0: # %entry +; SSE-NEXT: movntps %xmm0, (%rdi) +; SSE-NEXT: retq +; ; AVX-LABEL: test_nt4xfloat: ; AVX: # BB#0: # %entry ; AVX-NEXT: vmovntps %xmm0, (%rdi) @@ -37,10 +78,10 @@ } define void @test_nt2xdouble(<2 x double>* nocapture %ptr, <2 x double> %X) { -; SSE2-LABEL: test_nt2xdouble: -; SSE2: # BB#0: # %entry -; SSE2-NEXT: movntpd %xmm0, (%rdi) -; SSE2-NEXT: retq +; SSE-LABEL: test_nt2xdouble: +; SSE: # BB#0: # %entry +; SSE-NEXT: movntpd %xmm0, (%rdi) +; SSE-NEXT: retq ; ; AVX-LABEL: test_nt2xdouble: ; AVX: # BB#0: # %entry @@ -52,10 +93,10 @@ } define void @test_nt2xi64(<2 x i64>* nocapture %ptr, <2 x i64> %X) { -; SSE2-LABEL: test_nt2xi64: -; SSE2: # BB#0: # %entry -; SSE2-NEXT: movntdq %xmm0, (%rdi) -; SSE2-NEXT: retq +; SSE-LABEL: test_nt2xi64: +; SSE: # BB#0: # %entry +; SSE-NEXT: movntdq %xmm0, (%rdi) +; SSE-NEXT: retq ; ; AVX-LABEL: test_nt2xi64: ; AVX: # BB#0: # %entry