Index: test/CodeGen/X86/vector-mul.ll =================================================================== --- test/CodeGen/X86/vector-mul.ll +++ test/CodeGen/X86/vector-mul.ll @@ -245,24 +245,34 @@ define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind { ; X86-LABEL: mul_v2i64_17: ; X86: # BB#0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllq $4, %xmm1 -; X86-NEXT: paddq %xmm0, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: movdqa {{.*#+}} xmm1 = [17,0,17,0] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v2i64_17: ; X64: # BB#0: -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllq $4, %xmm1 -; X64-NEXT: paddq %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: movdqa {{.*#+}} xmm1 = [17,17] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; ; X64-AVX-LABEL: mul_v2i64_17: ; X64-AVX: # BB#0: -; X64-AVX-NEXT: vpsllq $4, %xmm0, %xmm1 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0 +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [17,17] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ; X64-AVX-NEXT: retq %1 = mul <2 x i64> %a0, ret <2 x i64> %1 @@ -271,25 +281,24 @@ define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind { ; X86-LABEL: mul_v4i32_17: ; X86: # BB#0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: pslld $4, %xmm1 -; X86-NEXT: paddd %xmm0, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v4i32_17: ; X64: # BB#0: -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: pslld $4, %xmm1 -; X64-NEXT: paddd %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v4i32_17: -; X64-AVX: # BB#0: -; X64-AVX-NEXT: vpslld $4, %xmm0, %xmm1 -; X64-AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v4i32_17: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v4i32_17: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [17,17,17,17] +; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: retq %1 = mul <4 x i32> %a0, ret <4 x i32> %1 } @@ -297,24 +306,17 @@ define <8 x i16> @mul_v8i16_17(<8 x i16> %a0) nounwind { ; X86-LABEL: mul_v8i16_17: ; X86: # BB#0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllw $4, %xmm1 -; X86-NEXT: paddw %xmm0, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v8i16_17: ; X64: # BB#0: -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllw $4, %xmm1 -; X64-NEXT: paddw %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: pmullw {{.*}}(%rip), %xmm0 ; X64-NEXT: retq ; ; X64-AVX-LABEL: mul_v8i16_17: ; X64-AVX: # BB#0: -; X64-AVX-NEXT: vpsllw $4, %xmm0, %xmm1 -; X64-AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %1 = mul <8 x i16> %a0, ret <8 x i16> %1 @@ -323,33 +325,58 @@ define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind { ; X86-LABEL: mul_v16i8_17: ; X86: # BB#0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllw $4, %xmm1 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm1 -; X86-NEXT: paddb %xmm0, %xmm1 +; X86-NEXT: pmovsxbw %xmm0, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17] +; X86-NEXT: pmullw %xmm2, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X86-NEXT: pand %xmm3, %xmm1 +; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X86-NEXT: pmovsxbw %xmm0, %xmm0 +; X86-NEXT: pmullw %xmm2, %xmm0 +; X86-NEXT: pand %xmm3, %xmm0 +; X86-NEXT: packuswb %xmm0, %xmm1 ; X86-NEXT: movdqa %xmm1, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v16i8_17: ; X64: # BB#0: -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllw $4, %xmm1 -; X64-NEXT: pand {{.*}}(%rip), %xmm1 -; X64-NEXT: paddb %xmm0, %xmm1 +; X64-NEXT: pmovsxbw %xmm0, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17] +; X64-NEXT: pmullw %xmm2, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-NEXT: pand %xmm3, %xmm1 +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-NEXT: pmovsxbw %xmm0, %xmm0 +; X64-NEXT: pmullw %xmm2, %xmm0 +; X64-NEXT: pand %xmm3, %xmm0 +; X64-NEXT: packuswb %xmm0, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: retq ; ; X64-XOP-LABEL: mul_v16i8_17: ; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm1 -; X64-XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17] +; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-XOP-NEXT: vpand %xmm3, %xmm1, %xmm1 +; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm0 +; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0 +; X64-XOP-NEXT: vpand %xmm3, %xmm0, %xmm0 +; X64-XOP-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 ; X64-XOP-NEXT: retq ; ; X64-AVX2-LABEL: mul_v16i8_17: ; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vpsllw $4, %xmm0, %xmm1 -; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 -; X64-AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 +; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq %1 = mul <16 x i8> %a0, ret <16 x i8> %1 @@ -362,37 +389,35 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind { ; X86-LABEL: mul_v2i64_17_65: ; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [17,0,65,0] ; X86-NEXT: movdqa %xmm0, %xmm2 -; X86-NEXT: psllq $6, %xmm2 -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllq $4, %xmm1 -; X86-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X86-NEXT: paddq %xmm0, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v2i64_17_65: ; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [17,65] ; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: psllq $6, %xmm2 -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllq $4, %xmm1 -; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X64-NEXT: paddq %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-XOP-LABEL: mul_v2i64_17_65: -; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1 -; X64-XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0 -; X64-XOP-NEXT: retq -; -; X64-AVX2-LABEL: mul_v2i64_17_65: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1 -; X64-AVX2-NEXT: vpaddq %xmm0, %xmm1, %xmm0 -; X64-AVX2-NEXT: retq +; X64-AVX-LABEL: mul_v2i64_17_65: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [17,65] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq %1 = mul <2 x i64> %a0, ret <2 x i64> %1 } @@ -400,29 +425,18 @@ define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind { ; X86-LABEL: mul_v4i32_5_17_33_65: ; X86: # BB#0: -; X86-NEXT: movdqa {{.*#+}} xmm1 = [4,16,32,64] -; X86-NEXT: pmulld %xmm0, %xmm1 -; X86-NEXT: paddd %xmm1, %xmm0 +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v4i32_5_17_33_65: ; X64: # BB#0: -; X64-NEXT: movdqa {{.*#+}} xmm1 = [4,16,32,64] -; X64-NEXT: pmulld %xmm0, %xmm1 -; X64-NEXT: paddd %xmm1, %xmm0 +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 ; X64-NEXT: retq ; -; X64-XOP-LABEL: mul_v4i32_5_17_33_65: -; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1 -; X64-XOP-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; X64-XOP-NEXT: retq -; -; X64-AVX2-LABEL: mul_v4i32_5_17_33_65: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm1 -; X64-AVX2-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; X64-AVX2-NEXT: retq +; X64-AVX-LABEL: mul_v4i32_5_17_33_65: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: retq %1 = mul <4 x i32> %a0, ret <4 x i32> %1 } @@ -510,24 +524,34 @@ define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind { ; X86-LABEL: mul_v2i64_7: ; X86: # BB#0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllq $3, %xmm1 -; X86-NEXT: psubq %xmm0, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: movdqa {{.*#+}} xmm1 = [7,0,7,0] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v2i64_7: ; X64: # BB#0: -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllq $3, %xmm1 -; X64-NEXT: psubq %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: movdqa {{.*#+}} xmm1 = [7,7] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; ; X64-AVX-LABEL: mul_v2i64_7: ; X64-AVX: # BB#0: -; X64-AVX-NEXT: vpsllq $3, %xmm0, %xmm1 -; X64-AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm0 +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [7,7] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ; X64-AVX-NEXT: retq %1 = mul <2 x i64> %a0, ret <2 x i64> %1 @@ -536,25 +560,24 @@ define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind { ; X86-LABEL: mul_v4i32_7: ; X86: # BB#0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: pslld $3, %xmm1 -; X86-NEXT: psubd %xmm0, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v4i32_7: ; X64: # BB#0: -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: pslld $3, %xmm1 -; X64-NEXT: psubd %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v4i32_7: -; X64-AVX: # BB#0: -; X64-AVX-NEXT: vpslld $3, %xmm0, %xmm1 -; X64-AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v4i32_7: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v4i32_7: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [7,7,7,7] +; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: retq %1 = mul <4 x i32> %a0, ret <4 x i32> %1 } @@ -562,24 +585,17 @@ define <8 x i16> @mul_v8i16_7(<8 x i16> %a0) nounwind { ; X86-LABEL: mul_v8i16_7: ; X86: # BB#0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllw $3, %xmm1 -; X86-NEXT: psubw %xmm0, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v8i16_7: ; X64: # BB#0: -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllw $3, %xmm1 -; X64-NEXT: psubw %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: pmullw {{.*}}(%rip), %xmm0 ; X64-NEXT: retq ; ; X64-AVX-LABEL: mul_v8i16_7: ; X64-AVX: # BB#0: -; X64-AVX-NEXT: vpsllw $3, %xmm0, %xmm1 -; X64-AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm0 +; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %1 = mul <8 x i16> %a0, ret <8 x i16> %1 @@ -588,33 +604,58 @@ define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind { ; X86-LABEL: mul_v16i8_31: ; X86: # BB#0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllw $5, %xmm1 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm1 -; X86-NEXT: psubb %xmm0, %xmm1 +; X86-NEXT: pmovsxbw %xmm0, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31] +; X86-NEXT: pmullw %xmm2, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X86-NEXT: pand %xmm3, %xmm1 +; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X86-NEXT: pmovsxbw %xmm0, %xmm0 +; X86-NEXT: pmullw %xmm2, %xmm0 +; X86-NEXT: pand %xmm3, %xmm0 +; X86-NEXT: packuswb %xmm0, %xmm1 ; X86-NEXT: movdqa %xmm1, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v16i8_31: ; X64: # BB#0: -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllw $5, %xmm1 -; X64-NEXT: pand {{.*}}(%rip), %xmm1 -; X64-NEXT: psubb %xmm0, %xmm1 +; X64-NEXT: pmovsxbw %xmm0, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31] +; X64-NEXT: pmullw %xmm2, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-NEXT: pand %xmm3, %xmm1 +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-NEXT: pmovsxbw %xmm0, %xmm0 +; X64-NEXT: pmullw %xmm2, %xmm0 +; X64-NEXT: pand %xmm3, %xmm0 +; X64-NEXT: packuswb %xmm0, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: retq ; ; X64-XOP-LABEL: mul_v16i8_31: ; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm1 -; X64-XOP-NEXT: vpsubb %xmm0, %xmm1, %xmm0 +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31] +; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-XOP-NEXT: vpand %xmm3, %xmm1, %xmm1 +; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm0 +; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0 +; X64-XOP-NEXT: vpand %xmm3, %xmm0, %xmm0 +; X64-XOP-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 ; X64-XOP-NEXT: retq ; ; X64-AVX2-LABEL: mul_v16i8_31: ; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vpsllw $5, %xmm0, %xmm1 -; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 -; X64-AVX2-NEXT: vpsubb %xmm0, %xmm1, %xmm0 +; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 +; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq %1 = mul <16 x i8> %a0, ret <16 x i8> %1 @@ -627,37 +668,35 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind { ; X86-LABEL: mul_v2i64_15_63: ; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [15,0,63,0] ; X86-NEXT: movdqa %xmm0, %xmm2 -; X86-NEXT: psllq $6, %xmm2 -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllq $4, %xmm1 -; X86-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X86-NEXT: psubq %xmm0, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v2i64_15_63: ; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [15,63] ; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: psllq $6, %xmm2 -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllq $4, %xmm1 -; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X64-NEXT: psubq %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-XOP-LABEL: mul_v2i64_15_63: -; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1 -; X64-XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm0 -; X64-XOP-NEXT: retq -; -; X64-AVX2-LABEL: mul_v2i64_15_63: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1 -; X64-AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0 -; X64-AVX2-NEXT: retq +; X64-AVX-LABEL: mul_v2i64_15_63: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,63] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq %1 = mul <2 x i64> %a0, ret <2 x i64> %1 } @@ -665,35 +704,43 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { ; X86-LABEL: mul_v2i64_neg_15_63: ; X86: # BB#0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllq $6, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967281,4294967295,4294967233,4294967295] ; X86-NEXT: movdqa %xmm0, %xmm2 -; X86-NEXT: psllq $4, %xmm2 -; X86-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] -; X86-NEXT: psubq %xmm2, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm3 +; X86-NEXT: psrlq $32, %xmm3 +; X86-NEXT: pmuludq %xmm1, %xmm3 +; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddq %xmm3, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v2i64_neg_15_63: ; X64: # BB#0: -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllq $6, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553] ; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: psllq $4, %xmm2 -; X64-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] -; X64-NEXT: psubq %xmm2, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: movdqa %xmm0, %xmm3 +; X64-NEXT: psrlq $32, %xmm3 +; X64-NEXT: pmuludq %xmm1, %xmm3 +; X64-NEXT: pmuludq {{.*}}(%rip), %xmm0 +; X64-NEXT: paddq %xmm3, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-XOP-LABEL: mul_v2i64_neg_15_63: -; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1 -; X64-XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; X64-XOP-NEXT: retq -; -; X64-AVX2-LABEL: mul_v2i64_neg_15_63: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1 -; X64-AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; X64-AVX2-NEXT: retq +; X64-AVX-LABEL: mul_v2i64_neg_15_63: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-AVX-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq %1 = mul <2 x i64> %a0, ret <2 x i64> %1 } @@ -701,43 +748,43 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind { ; X86-LABEL: mul_v2i64_neg_17_65: ; X86: # BB#0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllq $6, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967279,4294967295,4294967231,4294967295] ; X86-NEXT: movdqa %xmm0, %xmm2 -; X86-NEXT: psllq $4, %xmm2 -; X86-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] -; X86-NEXT: paddq %xmm0, %xmm2 -; X86-NEXT: pxor %xmm0, %xmm0 -; X86-NEXT: psubq %xmm2, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm3 +; X86-NEXT: psrlq $32, %xmm3 +; X86-NEXT: pmuludq %xmm1, %xmm3 +; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddq %xmm3, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v2i64_neg_17_65: ; X64: # BB#0: -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllq $6, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551] ; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: psllq $4, %xmm2 -; X64-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] -; X64-NEXT: paddq %xmm0, %xmm2 -; X64-NEXT: pxor %xmm0, %xmm0 -; X64-NEXT: psubq %xmm2, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: movdqa %xmm0, %xmm3 +; X64-NEXT: psrlq $32, %xmm3 +; X64-NEXT: pmuludq %xmm1, %xmm3 +; X64-NEXT: pmuludq {{.*}}(%rip), %xmm0 +; X64-NEXT: paddq %xmm3, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-XOP-LABEL: mul_v2i64_neg_17_65: -; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1 -; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0 -; X64-XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X64-XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm0 -; X64-XOP-NEXT: retq -; -; X64-AVX2-LABEL: mul_v2i64_neg_17_65: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1 -; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 -; X64-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X64-AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0 -; X64-AVX2-NEXT: retq +; X64-AVX-LABEL: mul_v2i64_neg_17_65: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-AVX-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq %1 = mul <2 x i64> %a0, ret <2 x i64> %1 } @@ -987,41 +1034,35 @@ define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind{ ; X86-LABEL: mul_v2i64_68_132: ; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [68,0,132,0] ; X86-NEXT: movdqa %xmm0, %xmm2 -; X86-NEXT: psllq $5, %xmm2 -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllq $4, %xmm1 -; X86-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X86-NEXT: paddq %xmm0, %xmm1 -; X86-NEXT: psllq $2, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v2i64_68_132: ; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [68,132] ; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: psllq $5, %xmm2 -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllq $4, %xmm1 -; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X64-NEXT: paddq %xmm0, %xmm1 -; X64-NEXT: psllq $2, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-XOP-LABEL: mul_v2i64_68_132: -; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1 -; X64-XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0 -; X64-XOP-NEXT: vpsllq $2, %xmm0, %xmm0 -; X64-XOP-NEXT: retq -; -; X64-AVX2-LABEL: mul_v2i64_68_132: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1 -; X64-AVX2-NEXT: vpaddq %xmm0, %xmm1, %xmm0 -; X64-AVX2-NEXT: vpsllq $2, %xmm0, %xmm0 -; X64-AVX2-NEXT: retq +; X64-AVX-LABEL: mul_v2i64_68_132: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [68,132] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq %mul = mul <2 x i64> %x, ret <2 x i64> %mul } @@ -1030,41 +1071,35 @@ define <2 x i64> @mul_v2i16_60_120(<2 x i64> %x) nounwind { ; X86-LABEL: mul_v2i16_60_120: ; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [60,0,124,0] ; X86-NEXT: movdqa %xmm0, %xmm2 -; X86-NEXT: psllq $5, %xmm2 -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psllq $4, %xmm1 -; X86-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X86-NEXT: psubq %xmm0, %xmm1 -; X86-NEXT: psllq $2, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v2i16_60_120: ; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [60,124] ; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: psllq $5, %xmm2 -; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: psllq $4, %xmm1 -; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X64-NEXT: psubq %xmm0, %xmm1 -; X64-NEXT: psllq $2, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-XOP-LABEL: mul_v2i16_60_120: -; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1 -; X64-XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm0 -; X64-XOP-NEXT: vpsllq $2, %xmm0, %xmm0 -; X64-XOP-NEXT: retq -; -; X64-AVX2-LABEL: mul_v2i16_60_120: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1 -; X64-AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0 -; X64-AVX2-NEXT: vpsllq $2, %xmm0, %xmm0 -; X64-AVX2-NEXT: retq +; X64-AVX-LABEL: mul_v2i16_60_120: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [60,124] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq %mul = mul <2 x i64> %x, ret <2 x i64> %mul }