diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -13933,13 +13933,25 @@ SDValue N1 = N->getOperand(1); if ((N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND)) { + EVT N1VT = N1->getValueType(0); + EVT N1Op0VT = N1->getOperand(0).getValueType(); + + // Always fold no-op FP casts. + if (N1VT == N1Op0VT) + return true; + // Do not optimize out type conversion of f128 type yet. // For some targets like x86_64, configuration is changed to keep one f128 // value in one SSE register, but instruction selection cannot handle // FCOPYSIGN on SSE registers yet. - EVT N1VT = N1->getValueType(0); - EVT N1Op0VT = N1->getOperand(0).getValueType(); - return (N1VT == N1Op0VT || N1Op0VT != MVT::f128); + if (N1Op0VT == MVT::f128) + return false; + + // Avoid mismatched vector operand types, for better instruction selection. + if (N1Op0VT.isVector()) + return false; + + return true; } return false; } diff --git a/llvm/test/CodeGen/AArch64/vector-fcopysign.ll b/llvm/test/CodeGen/AArch64/vector-fcopysign.ll --- a/llvm/test/CodeGen/AArch64/vector-fcopysign.ll +++ b/llvm/test/CodeGen/AArch64/vector-fcopysign.ll @@ -17,9 +17,9 @@ ; WidenVecRes mismatched define <1 x float> @test_copysign_v1f32_v1f64(<1 x float> %a, <1 x double> %b) #0 { ; CHECK-LABEL: test_copysign_v1f32_v1f64: -; CHECK-NEXT: fcvt s1, d1 -; CHECK-NEXT: movi.4s v2, #128, lsl #24 -; CHECK-NEXT: bit.16b v0, v1, v2 +; CHECK-NEXT: fcvtn v1.2s, v1.2d +; CHECK-NEXT: movi.2s v2, #128, lsl #24 +; CHECK-NEXT: bit.8b v0, v1, v2 ; CHECK-NEXT: ret %tmp0 = fptrunc <1 x double> %b to <1 x float> %r = call <1 x float> @llvm.copysign.v1f32(<1 x float> %a, <1 x float> %tmp0) @@ -33,7 +33,7 @@ ; WidenVecOp #1 define <1 x double> @test_copysign_v1f64_v1f32(<1 x double> %a, <1 x float> %b) #0 { ; CHECK-LABEL: test_copysign_v1f64_v1f32: -; CHECK-NEXT: fcvt d1, s1 +; CHECK-NEXT: fcvtl v1.2d, v1.2s ; CHECK-NEXT: movi.2d v2, #0000000000000000 ; CHECK-NEXT: fneg.2d v2, v2 ; CHECK-NEXT: bit.16b v0, v1, v2 @@ -93,23 +93,10 @@ ; SplitVecOp #1 define <4 x float> @test_copysign_v4f32_v4f64(<4 x float> %a, <4 x double> %b) #0 { ; CHECK-LABEL: test_copysign_v4f32_v4f64: -; CHECK-NEXT: mov s3, v0[1] -; CHECK-NEXT: movi.4s v4, #128, lsl #24 -; CHECK-NEXT: fcvt s5, d1 -; CHECK-NEXT: mov s6, v0[2] -; CHECK-NEXT: mov s7, v0[3] -; CHECK-NEXT: bit.16b v0, v5, v4 -; CHECK-NEXT: fcvt s5, d2 -; CHECK-NEXT: bit.16b v6, v5, v4 -; CHECK-NEXT: mov d1, v1[1] -; CHECK-NEXT: fcvt s1, d1 -; CHECK-NEXT: bit.16b v3, v1, v4 -; CHECK-NEXT: mov d1, v2[1] -; CHECK-NEXT: fcvt s1, d1 -; CHECK-NEXT: mov.s v0[1], v3[0] -; CHECK-NEXT: mov.s v0[2], v6[0] -; CHECK-NEXT: bit.16b v7, v1, v4 -; CHECK-NEXT: mov.s v0[3], v7[0] +; CHECK-NEXT: fcvtn v1.2s, v1.2d +; CHECK-NEXT: fcvtn2 v1.4s, v2.2d +; CHECK-NEXT: movi.4s v2, #128, lsl #24 +; CHECK-NEXT: bit.16b v0, v1, v2 ; CHECK-NEXT: ret %tmp0 = fptrunc <4 x double> %b to <4 x float> %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %tmp0) @@ -122,9 +109,9 @@ define <2 x double> @test_copysign_v2f64_v232(<2 x double> %a, <2 x float> %b) #0 { ; CHECK-LABEL: test_copysign_v2f64_v232: +; CHECK-NEXT: fcvtl v1.2d, v1.2s ; CHECK-NEXT: movi.2d v2, #0000000000000000 ; CHECK-NEXT: fneg.2d v2, v2 -; CHECK-NEXT: fcvtl v1.2d, v1.2s ; CHECK-NEXT: bit.16b v0, v1, v2 ; CHECK-NEXT: ret %tmp0 = fpext <2 x float> %b to <2 x double> @@ -149,12 +136,12 @@ ; SplitVecRes mismatched define <4 x double> @test_copysign_v4f64_v4f32(<4 x double> %a, <4 x float> %b) #0 { ; CHECK-LABEL: test_copysign_v4f64_v4f32: -; CHECK-NEXT: movi.2d v3, #0000000000000000 -; CHECK-NEXT: fcvtl2 v4.2d, v2.4s -; CHECK-NEXT: fcvtl v2.2d, v2.2s -; CHECK-NEXT: fneg.2d v3, v3 -; CHECK-NEXT: bit.16b v1, v4, v3 -; CHECK-NEXT: bit.16b v0, v2, v3 +; CHECK-NEXT: fcvtl v3.2d, v2.2s +; CHECK-NEXT: fcvtl2 v2.2d, v2.4s +; CHECK-NEXT: movi.2d v4, #0000000000000000 +; CHECK-NEXT: fneg.2d v4, v4 +; CHECK-NEXT: bit.16b v1, v2, v4 +; CHECK-NEXT: bit.16b v0, v3, v4 ; CHECK-NEXT: ret %tmp0 = fpext <4 x float> %b to <4 x double> %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %tmp0) diff --git a/llvm/test/CodeGen/X86/combine-fcopysign.ll b/llvm/test/CodeGen/X86/combine-fcopysign.ll --- a/llvm/test/CodeGen/X86/combine-fcopysign.ll +++ b/llvm/test/CodeGen/X86/combine-fcopysign.ll @@ -194,43 +194,24 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float> %y) { ; SSE-LABEL: combine_vec_fcopysign_fpext_sgn: ; SSE: # %bb.0: -; SSE-NEXT: movaps %xmm2, %xmm3 -; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm2[1,1,3,3] -; SSE-NEXT: cvtss2sd %xmm2, %xmm5 +; SSE-NEXT: cvtps2pd %xmm2, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3,3,3] -; SSE-NEXT: movaps {{.*#+}} xmm6 = [NaN,NaN] -; SSE-NEXT: cvtss2sd %xmm3, %xmm3 -; SSE-NEXT: movaps %xmm6, %xmm7 -; SSE-NEXT: andnps %xmm3, %xmm7 -; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1] -; SSE-NEXT: andps %xmm6, %xmm3 -; SSE-NEXT: orps %xmm3, %xmm7 -; SSE-NEXT: andps %xmm6, %xmm1 -; SSE-NEXT: cvtss2sd %xmm2, %xmm2 -; SSE-NEXT: movaps %xmm6, %xmm3 -; SSE-NEXT: andnps %xmm2, %xmm3 -; SSE-NEXT: orps %xmm3, %xmm1 -; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm7[0] -; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] -; SSE-NEXT: andps %xmm6, %xmm2 -; SSE-NEXT: xorps %xmm3, %xmm3 -; SSE-NEXT: cvtss2sd %xmm4, %xmm3 -; SSE-NEXT: andps %xmm6, %xmm0 -; SSE-NEXT: andnps %xmm3, %xmm6 -; SSE-NEXT: orps %xmm2, %xmm6 -; SSE-NEXT: andps {{.*}}(%rip), %xmm5 +; SSE-NEXT: cvtps2pd %xmm2, %xmm2 +; SSE-NEXT: movaps {{.*#+}} xmm4 = [NaN,NaN] +; SSE-NEXT: andps %xmm4, %xmm0 +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: andnps %xmm3, %xmm5 ; SSE-NEXT: orps %xmm5, %xmm0 -; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0] +; SSE-NEXT: andps %xmm4, %xmm1 +; SSE-NEXT: andnps %xmm2, %xmm4 +; SSE-NEXT: orps %xmm4, %xmm1 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_fcopysign_fpext_sgn: ; AVX: # %bb.0: +; AVX-NEXT: vcvtps2pd %xmm1, %ymm1 ; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [NaN,NaN,NaN,NaN] ; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0 -; AVX-NEXT: vcvtps2pd %xmm1, %ymm1 ; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] ; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0 @@ -244,45 +225,19 @@ define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x double> %y) { ; SSE-LABEL: combine_vec_fcopysign_fptrunc_sgn: ; SSE: # %bb.0: -; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] -; SSE-NEXT: movaps {{.*#+}} xmm4 = [NaN,NaN,NaN,NaN] -; SSE-NEXT: andps %xmm4, %xmm3 -; SSE-NEXT: cvtsd2ss %xmm2, %xmm5 -; SSE-NEXT: movaps %xmm4, %xmm6 -; SSE-NEXT: andnps %xmm5, %xmm6 -; SSE-NEXT: orps %xmm3, %xmm6 -; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: andps %xmm4, %xmm3 -; SSE-NEXT: xorps %xmm5, %xmm5 -; SSE-NEXT: cvtsd2ss %xmm1, %xmm5 -; SSE-NEXT: movaps %xmm4, %xmm7 -; SSE-NEXT: andnps %xmm5, %xmm7 -; SSE-NEXT: orps %xmm7, %xmm3 -; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3] -; SSE-NEXT: andps %xmm4, %xmm5 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] -; SSE-NEXT: cvtsd2ss %xmm1, %xmm1 -; SSE-NEXT: andps {{.*}}(%rip), %xmm1 -; SSE-NEXT: orps %xmm5, %xmm1 -; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm6[0] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] -; SSE-NEXT: andps %xmm4, %xmm0 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] -; SSE-NEXT: xorps %xmm1, %xmm1 -; SSE-NEXT: cvtsd2ss %xmm2, %xmm1 -; SSE-NEXT: andnps %xmm1, %xmm4 -; SSE-NEXT: orps %xmm0, %xmm4 -; SSE-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0] -; SSE-NEXT: movaps %xmm3, %xmm0 +; SSE-NEXT: cvtpd2ps %xmm2, %xmm2 +; SSE-NEXT: cvtpd2ps %xmm1, %xmm1 +; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: andpd {{.*}}(%rip), %xmm1 +; SSE-NEXT: andpd {{.*}}(%rip), %xmm0 +; SSE-NEXT: orpd %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_fcopysign_fptrunc_sgn: ; AVX: # %bb.0: +; AVX-NEXT: vcvtpd2ps %ymm1, %xmm1 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN] ; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vcvtpd2ps %ymm1, %xmm1 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] ; AVX-NEXT: vandpd %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0