Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -16740,10 +16740,8 @@ // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); } else { - SDValue S2F = DAG.getBitcast(MVT::v4i32, Sub); - SDValue Shuffle = DAG.getVectorShuffle(MVT::v4i32, dl, S2F, S2F, {2,3,0,1}); - Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, - DAG.getBitcast(MVT::v2f64, Shuffle), Sub); + SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1}); + Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub); } return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, Index: test/CodeGen/X86/ftrunc.ll =================================================================== --- test/CodeGen/X86/ftrunc.ll +++ test/CodeGen/X86/ftrunc.ll @@ -41,7 +41,8 @@ ; SSE2-NEXT: movq %rax, %xmm1 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; SSE2-NEXT: subpd {{.*}}(%rip), %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE2-NEXT: addpd %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -128,12 +129,14 @@ ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] ; SSE2-NEXT: subpd %xmm3, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE2-NEXT: addpd %xmm1, %xmm0 ; SSE2-NEXT: movq %rdx, %xmm1 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: subpd %xmm3, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE2-NEXT: movapd %xmm1, %xmm2 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE2-NEXT: addpd %xmm1, %xmm2 ; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; SSE2-NEXT: retq @@ -194,23 +197,27 @@ ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] ; SSE2-NEXT: subpd %xmm3, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE2-NEXT: addpd %xmm1, %xmm0 ; SSE2-NEXT: movq %rdi, %xmm1 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: subpd %xmm3, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE2-NEXT: movapd %xmm1, %xmm4 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1] ; SSE2-NEXT: addpd %xmm1, %xmm4 ; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm4[0] ; SSE2-NEXT: movq %rcx, %xmm4 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] ; SSE2-NEXT: subpd %xmm3, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,0,1] +; SSE2-NEXT: movapd %xmm4, %xmm1 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1] ; SSE2-NEXT: addpd %xmm4, %xmm1 ; SSE2-NEXT: movq %rax, %xmm4 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] ; SSE2-NEXT: subpd %xmm3, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,0,1] +; SSE2-NEXT: movapd %xmm4, %xmm2 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1] ; SSE2-NEXT: addpd %xmm4, %xmm2 ; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: retq @@ -356,7 +363,7 @@ ret <4 x double> %r } -; The fold may be guarded to allow existing code to continue +; The fold may be guarded to allow existing code to continue ; working based on its assumptions of float->int overflow. define float @trunc_unsigned_f32_disable_via_attr(float %x) #1 { Index: test/CodeGen/X86/scalar-int-to-fp.ll =================================================================== --- test/CodeGen/X86/scalar-int-to-fp.ll +++ test/CodeGen/X86/scalar-int-to-fp.ll @@ -636,7 +636,8 @@ ; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE2_32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; SSE2_32-NEXT: subpd {{\.LCPI.*}}, %xmm0 -; SSE2_32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2_32-NEXT: movapd %xmm0, %xmm1 +; SSE2_32-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE2_32-NEXT: addpd %xmm0, %xmm1 ; SSE2_32-NEXT: movlpd %xmm1, (%esp) ; SSE2_32-NEXT: fldl (%esp) @@ -649,7 +650,8 @@ ; SSE2_64-NEXT: movq %rdi, %xmm1 ; SSE2_64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; SSE2_64-NEXT: subpd {{.*}}(%rip), %xmm1 -; SSE2_64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2_64-NEXT: movapd %xmm1, %xmm0 +; SSE2_64-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE2_64-NEXT: addpd %xmm1, %xmm0 ; SSE2_64-NEXT: retq ; Index: test/CodeGen/X86/vec_int_to_fp.ll =================================================================== --- test/CodeGen/X86/vec_int_to_fp.ll +++ test/CodeGen/X86/vec_int_to_fp.ll @@ -412,18 +412,21 @@ define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) { ; SSE-LABEL: uitofp_2i64_to_2f64: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE-NEXT: movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] -; SSE-NEXT: subpd %xmm3, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] -; SSE-NEXT: addpd %xmm4, %xmm0 -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE-NEXT: subpd %xmm3, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] -; SSE-NEXT: addpd %xmm2, %xmm1 -; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE-NEXT: movapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] +; SSE-NEXT: subpd %xmm4, %xmm0 +; SSE-NEXT: movapd %xmm0, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE-NEXT: addpd %xmm0, %xmm1 +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE-NEXT: subpd %xmm4, %xmm3 +; SSE-NEXT: movapd %xmm3, %xmm0 +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1] +; SSE-NEXT: addpd %xmm3, %xmm0 +; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE-NEXT: movapd %xmm1, %xmm0 ; SSE-NEXT: retq ; ; VEX-LABEL: uitofp_2i64_to_2f64: @@ -691,28 +694,34 @@ define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) { ; SSE-LABEL: uitofp_4i64_to_4f64: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE-NEXT: movapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] -; SSE-NEXT: subpd %xmm4, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,0,1] -; SSE-NEXT: addpd %xmm5, %xmm0 -; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE-NEXT: subpd %xmm4, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] -; SSE-NEXT: addpd %xmm3, %xmm5 -; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm5[0] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] -; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE-NEXT: subpd %xmm4, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,0,1] -; SSE-NEXT: addpd %xmm5, %xmm1 -; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE-NEXT: subpd %xmm4, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] -; SSE-NEXT: addpd %xmm3, %xmm2 -; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1127219200,1160773632,0,0] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: movapd {{.*#+}} xmm5 = [4.503600e+15,1.934281e+25] +; SSE-NEXT: subpd %xmm5, %xmm2 +; SSE-NEXT: movapd %xmm2, %xmm0 +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] +; SSE-NEXT: addpd %xmm2, %xmm0 +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE-NEXT: subpd %xmm5, %xmm4 +; SSE-NEXT: movapd %xmm4, %xmm2 +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1] +; SSE-NEXT: addpd %xmm4, %xmm2 +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE-NEXT: subpd %xmm5, %xmm1 +; SSE-NEXT: movapd %xmm1, %xmm2 +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] +; SSE-NEXT: addpd %xmm1, %xmm2 +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE-NEXT: subpd %xmm5, %xmm4 +; SSE-NEXT: movapd %xmm4, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1] +; SSE-NEXT: addpd %xmm4, %xmm1 +; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; SSE-NEXT: movapd %xmm2, %xmm1 ; SSE-NEXT: retq ; ; VEX-LABEL: uitofp_4i64_to_4f64: @@ -2797,11 +2806,13 @@ ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE-NEXT: movapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] ; SSE-NEXT: subpd %xmm4, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE-NEXT: movapd %xmm1, %xmm0 +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE-NEXT: addpd %xmm1, %xmm0 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] ; SSE-NEXT: subpd %xmm4, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1] +; SSE-NEXT: movapd %xmm3, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1] ; SSE-NEXT: addpd %xmm3, %xmm1 ; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: retq @@ -2963,21 +2974,25 @@ ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; SSE-NEXT: movapd {{.*#+}} xmm5 = [4.503600e+15,1.934281e+25] ; SSE-NEXT: subpd %xmm5, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE-NEXT: movapd %xmm1, %xmm0 +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE-NEXT: addpd %xmm1, %xmm0 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; SSE-NEXT: subpd %xmm5, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,0,1] +; SSE-NEXT: movapd %xmm4, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1] ; SSE-NEXT: addpd %xmm4, %xmm1 ; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1] ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: subpd %xmm5, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSE-NEXT: movapd %xmm2, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] ; SSE-NEXT: addpd %xmm2, %xmm1 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; SSE-NEXT: subpd %xmm5, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,0,1] +; SSE-NEXT: movapd %xmm4, %xmm2 +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1] ; SSE-NEXT: addpd %xmm4, %xmm2 ; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE-NEXT: retq