Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -27091,6 +27091,18 @@ return Tmp; } + case X86ISD::PACKSS: { + // PACKSS is just a truncation if the sign bits extend to the packed size. + // TODO: Add DemandedElts support. + unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits(); + unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); + unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth + 1); + unsigned Tmp = std::min(Tmp0, Tmp1); + if (Tmp > (SrcBits - VTBits)) + return Tmp - (SrcBits - VTBits); + return 1; + } + case X86ISD::VSHLI: { SDValue Src = Op.getOperand(0); unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1); Index: llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-512.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-512.ll +++ llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-512.ll @@ -1,1690 +1,1678 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BW - -define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) { -; SSE-LABEL: v8i64: -; SSE: # BB#0: -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pcmpgtq %xmm7, %xmm3 -; SSE-NEXT: pcmpgtq %xmm6, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] -; SSE-NEXT: pslld $31, %xmm2 -; SSE-NEXT: psrad $31, %xmm2 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSE-NEXT: pshufb %xmm3, %xmm2 -; SSE-NEXT: pcmpgtq %xmm5, %xmm1 -; SSE-NEXT: pcmpgtq %xmm4, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; SSE-NEXT: pslld $31, %xmm0 -; SSE-NEXT: psrad $31, %xmm0 -; SSE-NEXT: pshufb %xmm3, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSE-NEXT: psllw $15, %xmm0 -; SSE-NEXT: psraw $15, %xmm0 -; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm11[0,2] -; SSE-NEXT: pslld $31, %xmm9 -; SSE-NEXT: psrad $31, %xmm9 -; SSE-NEXT: pshufb %xmm3, %xmm9 -; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm10[0,2] -; SSE-NEXT: pslld $31, %xmm8 -; SSE-NEXT: psrad $31, %xmm8 -; SSE-NEXT: pshufb %xmm3, %xmm8 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0] -; SSE-NEXT: psllw $15, %xmm8 -; SSE-NEXT: psraw $15, %xmm8 -; SSE-NEXT: pand %xmm0, %xmm8 -; SSE-NEXT: pshufb {{.*#+}} xmm8 = xmm8[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; SSE-NEXT: pmovmskb %xmm8, %eax -; SSE-NEXT: # kill: %AL %AL %EAX -; SSE-NEXT: retq -; -; AVX1-LABEL: v8i64: -; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9 -; AVX1-NEXT: vpcmpgtq %xmm8, %xmm9, %xmm8 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpacksswb %xmm8, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm9 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpshufb %xmm8, %xmm0, %xmm0 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm9[0] -; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0 -; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2 -; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm2 -; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm3 -; AVX1-NEXT: vpacksswb %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpshufb %xmm8, %xmm2, %xmm2 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1 -; AVX1-NEXT: vpsraw $15, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: # kill: %AL %AL %EAX -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: v8i64: -; AVX2: # BB#0: -; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 -; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0 -; AVX2-NEXT: vpsraw $15, %xmm0, %xmm0 -; AVX2-NEXT: vpcmpgtq %ymm7, %ymm5, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtq %ymm6, %ymm4, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 -; AVX2-NEXT: vpacksswb %xmm4, %xmm2, %xmm2 -; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX2-NEXT: vpsllw $15, %xmm1, %xmm1 -; AVX2-NEXT: vpsraw $15, %xmm1, %xmm1 -; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: # kill: %AL %AL %EAX -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512F-LABEL: v8i64: -; AVX512F: # BB#0: -; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 -; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1} -; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: %AL %AL %EAX -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: v8i64: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 -; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1} -; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: %AL %AL %EAX -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq - %x0 = icmp sgt <8 x i64> %a, %b - %x1 = icmp sgt <8 x i64> %c, %d - %y = and <8 x i1> %x0, %x1 - %res = bitcast <8 x i1> %y to i8 - ret i8 %res -} - -define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d) { -; SSE-LABEL: v8f64: -; SSE: # BB#0: -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: cmpltpd %xmm3, %xmm7 -; SSE-NEXT: cmpltpd %xmm2, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2] -; SSE-NEXT: pslld $31, %xmm6 -; SSE-NEXT: psrad $31, %xmm6 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSE-NEXT: pshufb %xmm2, %xmm6 -; SSE-NEXT: cmpltpd %xmm1, %xmm5 -; SSE-NEXT: cmpltpd %xmm0, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2] -; SSE-NEXT: pslld $31, %xmm4 -; SSE-NEXT: psrad $31, %xmm4 -; SSE-NEXT: pshufb %xmm2, %xmm4 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0] -; SSE-NEXT: psllw $15, %xmm4 -; SSE-NEXT: psraw $15, %xmm4 -; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm11[0,2] -; SSE-NEXT: pslld $31, %xmm9 -; SSE-NEXT: psrad $31, %xmm9 -; SSE-NEXT: pshufb %xmm2, %xmm9 -; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm10[0,2] -; SSE-NEXT: pslld $31, %xmm8 -; SSE-NEXT: psrad $31, %xmm8 -; SSE-NEXT: pshufb %xmm2, %xmm8 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0] -; SSE-NEXT: psllw $15, %xmm8 -; SSE-NEXT: psraw $15, %xmm8 -; SSE-NEXT: pand %xmm4, %xmm8 -; SSE-NEXT: pshufb {{.*#+}} xmm8 = xmm8[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; SSE-NEXT: pmovmskb %xmm8, %eax -; SSE-NEXT: # kill: %AL %AL %EAX -; SSE-NEXT: retq -; -; AVX12-LABEL: v8f64: -; AVX12: # BB#0: -; AVX12-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1 -; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX12-NEXT: vpacksswb %xmm3, %xmm1, %xmm1 -; AVX12-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; AVX12-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX12-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0 -; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX12-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 -; AVX12-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX12-NEXT: vpsllw $15, %xmm0, %xmm0 -; AVX12-NEXT: vpsraw $15, %xmm0, %xmm0 -; AVX12-NEXT: vcmpltpd %ymm5, %ymm7, %ymm1 -; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX12-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 -; AVX12-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX12-NEXT: vcmpltpd %ymm4, %ymm6, %ymm2 -; AVX12-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX12-NEXT: vpacksswb %xmm4, %xmm2, %xmm2 -; AVX12-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX12-NEXT: vpsllw $15, %xmm1, %xmm1 -; AVX12-NEXT: vpsraw $15, %xmm1, %xmm1 -; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: %AL %AL %EAX -; AVX12-NEXT: vzeroupper -; AVX12-NEXT: retq -; -; AVX512F-LABEL: v8f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1 -; AVX512F-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1} -; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: %AL %AL %EAX -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: v8f64: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1 -; AVX512BW-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1} -; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: %AL %AL %EAX -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq - %x0 = fcmp ogt <8 x double> %a, %b - %x1 = fcmp ogt <8 x double> %c, %d - %y = and <8 x i1> %x0, %x1 - %res = bitcast <8 x i1> %y to i8 - ret i8 %res -} - -define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) { -; SSE-LABEL: v32i16: -; SSE: # BB#0: -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pcmpgtw %xmm5, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; SSE-NEXT: pshufb %xmm5, %xmm1 -; SSE-NEXT: pcmpgtw %xmm4, %xmm0 -; SSE-NEXT: pshufb %xmm5, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE-NEXT: psllw $7, %xmm0 -; SSE-NEXT: movdqa {{.*#+}} xmm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: pxor %xmm1, %xmm1 -; SSE-NEXT: pxor %xmm4, %xmm4 -; SSE-NEXT: pcmpgtb %xmm0, %xmm4 -; SSE-NEXT: pcmpgtw %xmm7, %xmm3 -; SSE-NEXT: pshufb %xmm5, %xmm3 -; SSE-NEXT: pcmpgtw %xmm6, %xmm2 -; SSE-NEXT: pshufb %xmm5, %xmm2 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; SSE-NEXT: psllw $7, %xmm2 -; SSE-NEXT: pand %xmm12, %xmm2 -; SSE-NEXT: pxor %xmm0, %xmm0 -; SSE-NEXT: pcmpgtb %xmm2, %xmm0 -; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pshufb %xmm5, %xmm11 -; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: pshufb %xmm5, %xmm10 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm10 = xmm10[0],xmm11[0] -; SSE-NEXT: psllw $7, %xmm10 -; SSE-NEXT: pand %xmm12, %xmm10 -; SSE-NEXT: pxor %xmm2, %xmm2 -; SSE-NEXT: pcmpgtb %xmm10, %xmm2 -; SSE-NEXT: pand %xmm4, %xmm2 -; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: pshufb %xmm5, %xmm9 -; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: pshufb %xmm5, %xmm8 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0] -; SSE-NEXT: psllw $7, %xmm8 -; SSE-NEXT: pand %xmm12, %xmm8 -; SSE-NEXT: pcmpgtb %xmm8, %xmm1 -; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: pmovmskb %xmm2, %ecx -; SSE-NEXT: pmovmskb %xmm1, %eax -; SSE-NEXT: shll $16, %eax -; SSE-NEXT: orl %ecx, %eax -; SSE-NEXT: retq -; -; AVX1-LABEL: v32i16: -; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9 -; AVX1-NEXT: vpcmpgtw %xmm8, %xmm9, %xmm8 -; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpacksswb %xmm8, %xmm1, %xmm8 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2 -; AVX1-NEXT: vpcmpgtw %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpcmpgtw %xmm7, %xmm5, %xmm2 -; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpand %xmm1, %xmm8, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3 -; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpcmpgtw %xmm6, %xmm4, %xmm3 -; AVX1-NEXT: vpacksswb %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0 -; AVX1-NEXT: vpmovmskb %xmm0, %ecx -; AVX1-NEXT: vpsllw $7, %xmm1, %xmm0 -; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0 -; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: orl %ecx, %eax -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: v32i16: -; AVX2: # BB#0: -; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 -; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 -; AVX2-NEXT: vpcmpgtw %ymm7, %ymm5, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtw %ymm6, %ymm4, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX2-NEXT: vpacksswb %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vpmovmskb %ymm0, %eax -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512F-LABEL: v32i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: pushq %rbp -; AVX512F-NEXT: .Lcfi0: -; AVX512F-NEXT: .cfi_def_cfa_offset 16 -; AVX512F-NEXT: .Lcfi1: -; AVX512F-NEXT: .cfi_offset %rbp, -16 -; AVX512F-NEXT: movq %rsp, %rbp -; AVX512F-NEXT: .Lcfi2: -; AVX512F-NEXT: .cfi_def_cfa_register %rbp -; AVX512F-NEXT: andq $-32, %rsp -; AVX512F-NEXT: subq $32, %rsp -; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1 -; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1 -; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 -; AVX512F-NEXT: kshiftlw $14, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: kshiftlw $15, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $13, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $12, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $11, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $10, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $9, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $8, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $7, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $6, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $5, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $4, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $3, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $2, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $1, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftrw $15, %k0, %k0 -; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 -; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 -; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 -; AVX512F-NEXT: kshiftlw $14, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: kshiftlw $15, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm0 -; AVX512F-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $13, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $12, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $11, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $10, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $9, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $8, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $7, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $6, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $5, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $4, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $3, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $2, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftlw $1, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: kshiftrw $15, %k0, %k0 -; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 -; AVX512F-NEXT: vpcmpgtw %ymm7, %ymm5, %ymm1 -; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1 -; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 -; AVX512F-NEXT: kshiftlw $14, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: kshiftlw $15, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $13, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $12, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $11, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $10, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $9, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $8, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $7, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $6, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $5, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $4, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $3, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $2, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftlw $1, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: kshiftrw $15, %k0, %k0 -; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX512F-NEXT: vpcmpgtw %ymm6, %ymm4, %ymm2 -; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 -; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2 -; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0 -; AVX512F-NEXT: kshiftlw $14, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: kshiftlw $15, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm2 -; AVX512F-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $13, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $12, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $11, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $10, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $9, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $8, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $7, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $6, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $5, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $4, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $3, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $2, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftlw $1, %k0, %k1 -; AVX512F-NEXT: kshiftrw $15, %k1, %k1 -; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: kshiftrw $15, %k0, %k0 -; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2 -; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 -; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 -; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) -; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 -; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 -; AVX512F-NEXT: kmovw %k0, (%rsp) -; AVX512F-NEXT: movl (%rsp), %eax -; AVX512F-NEXT: movq %rbp, %rsp -; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: v32i16: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1 -; AVX512BW-NEXT: vpcmpgtw %zmm3, %zmm2, %k0 {%k1} -; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq - %x0 = icmp sgt <32 x i16> %a, %b - %x1 = icmp sgt <32 x i16> %c, %d - %y = and <32 x i1> %x0, %x1 - %res = bitcast <32 x i1> %y to i32 - ret i32 %res -} - -define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) { -; SSE-LABEL: v16i32: -; SSE: # BB#0: -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pcmpgtd %xmm7, %xmm3 -; SSE-NEXT: movdqa {{.*#+}} xmm7 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSE-NEXT: pshufb %xmm7, %xmm3 -; SSE-NEXT: pcmpgtd %xmm6, %xmm2 -; SSE-NEXT: pshufb %xmm7, %xmm2 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; SSE-NEXT: psllw $15, %xmm2 -; SSE-NEXT: psraw $15, %xmm2 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; SSE-NEXT: pshufb %xmm3, %xmm2 -; SSE-NEXT: pcmpgtd %xmm5, %xmm1 -; SSE-NEXT: pshufb %xmm7, %xmm1 -; SSE-NEXT: pcmpgtd %xmm4, %xmm0 -; SSE-NEXT: pshufb %xmm7, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE-NEXT: psllw $15, %xmm0 -; SSE-NEXT: psraw $15, %xmm0 -; SSE-NEXT: pshufb %xmm3, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSE-NEXT: psllw $7, %xmm0 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pxor %xmm1, %xmm1 -; SSE-NEXT: pxor %xmm4, %xmm4 -; SSE-NEXT: pcmpgtb %xmm0, %xmm4 -; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pshufb %xmm7, %xmm11 -; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: pshufb %xmm7, %xmm9 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0] -; SSE-NEXT: psllw $15, %xmm9 -; SSE-NEXT: psraw $15, %xmm9 -; SSE-NEXT: pshufb %xmm3, %xmm9 -; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: pshufb %xmm7, %xmm10 -; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: pshufb %xmm7, %xmm8 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0] -; SSE-NEXT: psllw $15, %xmm8 -; SSE-NEXT: psraw $15, %xmm8 -; SSE-NEXT: pshufb %xmm3, %xmm8 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0] -; SSE-NEXT: psllw $7, %xmm8 -; SSE-NEXT: pand %xmm2, %xmm8 -; SSE-NEXT: pcmpgtb %xmm8, %xmm1 -; SSE-NEXT: pand %xmm4, %xmm1 -; SSE-NEXT: pmovmskb %xmm1, %eax -; SSE-NEXT: # kill: %AX %AX %EAX -; SSE-NEXT: retq -; -; AVX1-LABEL: v16i32: -; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9 -; AVX1-NEXT: vpcmpgtd %xmm8, %xmm9, %xmm8 -; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpacksswb %xmm8, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm9 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpshufb %xmm8, %xmm0, %xmm0 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm9[0] -; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; AVX1-NEXT: vpand %xmm9, %xmm0, %xmm0 -; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm1 -; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpgtd %xmm7, %xmm5, %xmm3 -; AVX1-NEXT: vpacksswb %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5 -; AVX1-NEXT: vpcmpgtd %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpcmpgtd %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpacksswb %xmm3, %xmm4, %xmm3 -; AVX1-NEXT: vpshufb %xmm8, %xmm3, %xmm3 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0] -; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm9, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: # kill: %AX %AX %EAX -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: v16i32: -; AVX2: # BB#0: -; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 -; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 -; AVX2-NEXT: vpcmpgtd %ymm7, %ymm5, %ymm5 -; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm7 -; AVX2-NEXT: vpacksswb %xmm7, %xmm5, %xmm5 -; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5 -; AVX2-NEXT: vpcmpgtd %ymm6, %ymm4, %ymm4 -; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm6 -; AVX2-NEXT: vpacksswb %xmm6, %xmm4, %xmm4 -; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm3 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] -; AVX2-NEXT: vpsllw $7, %xmm3, %xmm3 -; AVX2-NEXT: vpand %xmm1, %xmm3, %xmm1 -; AVX2-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 -; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: # kill: %AX %AX %EAX -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512F-LABEL: v16i32: -; AVX512F: # BB#0: -; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1 -; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1} -; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: %AX %AX %EAX -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: v16i32: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k1 -; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1} -; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: %AX %AX %EAX -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq - %x0 = icmp sgt <16 x i32> %a, %b - %x1 = icmp sgt <16 x i32> %c, %d - %y = and <16 x i1> %x0, %x1 - %res = bitcast <16 x i1> %y to i16 - ret i16 %res -} - -define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x float> %d) { -; SSE-LABEL: v16f32: -; SSE: # BB#0: -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: cmpltps %xmm3, %xmm7 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSE-NEXT: pshufb %xmm3, %xmm7 -; SSE-NEXT: cmpltps %xmm2, %xmm6 -; SSE-NEXT: pshufb %xmm3, %xmm6 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0] -; SSE-NEXT: psllw $15, %xmm6 -; SSE-NEXT: psraw $15, %xmm6 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; SSE-NEXT: pshufb %xmm2, %xmm6 -; SSE-NEXT: cmpltps %xmm1, %xmm5 -; SSE-NEXT: pshufb %xmm3, %xmm5 -; SSE-NEXT: cmpltps %xmm0, %xmm4 -; SSE-NEXT: pshufb %xmm3, %xmm4 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0] -; SSE-NEXT: psllw $15, %xmm4 -; SSE-NEXT: psraw $15, %xmm4 -; SSE-NEXT: pshufb %xmm2, %xmm4 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0] -; SSE-NEXT: psllw $7, %xmm4 -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; SSE-NEXT: pand %xmm1, %xmm4 -; SSE-NEXT: xorps %xmm0, %xmm0 -; SSE-NEXT: pxor %xmm5, %xmm5 -; SSE-NEXT: pcmpgtb %xmm4, %xmm5 -; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pshufb %xmm3, %xmm11 -; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: pshufb %xmm3, %xmm9 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0] -; SSE-NEXT: psllw $15, %xmm9 -; SSE-NEXT: psraw $15, %xmm9 -; SSE-NEXT: pshufb %xmm2, %xmm9 -; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: pshufb %xmm3, %xmm10 -; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: pshufb %xmm3, %xmm8 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0] -; SSE-NEXT: psllw $15, %xmm8 -; SSE-NEXT: psraw $15, %xmm8 -; SSE-NEXT: pshufb %xmm2, %xmm8 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0] -; SSE-NEXT: psllw $7, %xmm8 -; SSE-NEXT: pand %xmm1, %xmm8 -; SSE-NEXT: pcmpgtb %xmm8, %xmm0 -; SSE-NEXT: pand %xmm5, %xmm0 -; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: # kill: %AX %AX %EAX -; SSE-NEXT: retq -; -; AVX12-LABEL: v16f32: -; AVX12: # BB#0: -; AVX12-NEXT: vcmpltps %ymm1, %ymm3, %ymm1 -; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX12-NEXT: vpacksswb %xmm3, %xmm1, %xmm1 -; AVX12-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX12-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX12-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 -; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX12-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 -; AVX12-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX12-NEXT: vpsllw $7, %xmm0, %xmm0 -; AVX12-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX12-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX12-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 -; AVX12-NEXT: vcmpltps %ymm5, %ymm7, %ymm5 -; AVX12-NEXT: vextractf128 $1, %ymm5, %xmm7 -; AVX12-NEXT: vpacksswb %xmm7, %xmm5, %xmm5 -; AVX12-NEXT: vpshufb %xmm3, %xmm5, %xmm5 -; AVX12-NEXT: vcmpltps %ymm4, %ymm6, %ymm4 -; AVX12-NEXT: vextractf128 $1, %ymm4, %xmm6 -; AVX12-NEXT: vpacksswb %xmm6, %xmm4, %xmm4 -; AVX12-NEXT: vpshufb %xmm3, %xmm4, %xmm3 -; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] -; AVX12-NEXT: vpsllw $7, %xmm3, %xmm3 -; AVX12-NEXT: vpand %xmm1, %xmm3, %xmm1 -; AVX12-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 -; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: %AX %AX %EAX -; AVX12-NEXT: vzeroupper -; AVX12-NEXT: retq -; -; AVX512F-LABEL: v16f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1 -; AVX512F-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1} -; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: %AX %AX %EAX -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: v16f32: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k1 -; AVX512BW-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1} -; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: %AX %AX %EAX -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq - %x0 = fcmp ogt <16 x float> %a, %b - %x1 = fcmp ogt <16 x float> %c, %d - %y = and <16 x i1> %x0, %x1 - %res = bitcast <16 x i1> %y to i16 - ret i16 %res -} - -define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) { -; SSE-LABEL: v64i8: -; SSE: # BB#0: -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: pcmpgtb %xmm6, %xmm2 -; SSE-NEXT: pcmpgtb %xmm7, %xmm3 -; SSE-NEXT: pcmpgtb %xmm4, %xmm0 -; SSE-NEXT: pcmpgtb %xmm5, %xmm1 -; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: pand %xmm2, %xmm8 -; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: pand %xmm3, %xmm9 -; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: pand %xmm0, %xmm10 -; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pand %xmm1, %xmm11 -; SSE-NEXT: pextrb $15, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $14, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $13, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $12, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $11, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $10, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $9, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $8, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $7, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $6, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $5, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $4, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $3, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $2, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $1, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $0, %xmm11, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $15, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $14, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $13, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $12, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $11, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $10, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $9, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $8, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $7, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $6, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $5, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $4, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $3, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $2, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $1, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $0, %xmm10, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $15, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $14, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $13, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $12, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $11, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $10, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $9, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $8, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $7, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $6, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $5, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $4, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $3, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $2, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $1, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $0, %xmm9, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $15, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $14, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $13, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $12, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $11, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $10, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $9, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $8, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $7, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $6, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $5, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $4, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $3, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $2, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $1, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: pextrb $0, %xmm8, %eax -; SSE-NEXT: andb $1, %al -; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: shll $16, %eax -; SSE-NEXT: movzwl -{{[0-9]+}}(%rsp), %ecx -; SSE-NEXT: orl %eax, %ecx -; SSE-NEXT: movl -{{[0-9]+}}(%rsp), %edx -; SSE-NEXT: shll $16, %edx -; SSE-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: orl %edx, %eax -; SSE-NEXT: shlq $32, %rax -; SSE-NEXT: orq %rcx, %rax -; SSE-NEXT: retq -; -; AVX1-LABEL: v64i8: -; AVX1: # BB#0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: .Lcfi0: -; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: .Lcfi1: -; AVX1-NEXT: .cfi_offset %rbp, -16 -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: .Lcfi2: -; AVX1-NEXT: .cfi_def_cfa_register %rbp -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $64, %rsp -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9 -; AVX1-NEXT: vpcmpgtb %xmm8, %xmm9, %xmm8 -; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm1, %ymm8 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm0 -; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2 -; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vpcmpgtb %xmm7, %xmm5, %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 -; AVX1-NEXT: vandps %ymm0, %ymm8, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3 -; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpcmpgtb %xmm6, %xmm4, %xmm3 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 -; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vpextrb $15, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $14, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $13, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $12, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $11, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $10, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $9, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $8, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $7, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $6, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $5, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $4, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $3, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $2, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $1, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $0, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $15, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $14, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $13, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $12, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $11, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $10, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $9, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $8, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $7, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $6, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $5, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $4, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $3, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $2, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $1, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vpextrb $0, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, (%rsp) -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrb $15, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $14, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $13, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $12, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $11, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $10, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $9, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $8, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $7, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $6, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $5, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $4, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $3, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $2, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $1, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $0, %xmm1, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $15, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $14, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $13, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $12, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $11, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $10, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $9, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $8, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $7, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $6, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $5, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $4, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $3, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $2, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $1, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $0, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX1-NEXT: movl (%rsp), %ecx -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: shlq $32, %rax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: movq %rbp, %rsp -; AVX1-NEXT: popq %rbp -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: v64i8: -; AVX2: # BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .Lcfi0: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .Lcfi1: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .Lcfi2: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $64, %rsp -; AVX2-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm2 -; AVX2-NEXT: vpcmpgtb %ymm7, %ymm5, %ymm0 -; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vpcmpgtb %ymm6, %ymm4, %ymm1 -; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpextrb $15, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $14, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $13, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $12, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $11, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $10, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $9, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $8, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $7, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $6, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $5, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $4, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $3, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $2, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $1, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $0, %xmm2, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $15, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $14, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $13, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $12, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $11, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $10, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $9, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $8, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $7, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $6, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $5, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $4, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $3, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $2, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $1, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $0, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpextrb $15, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $14, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $13, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $12, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $11, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $10, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $9, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $8, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $7, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $6, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $5, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $4, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $3, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $2, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $1, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $0, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $15, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $14, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $13, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $12, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $11, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $10, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $9, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $8, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $7, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $6, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $5, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $4, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $3, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $2, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $1, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $0, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) -; AVX2-NEXT: movl (%rsp), %ecx -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: shlq $32, %rax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: movq %rbp, %rsp -; AVX2-NEXT: popq %rbp -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512F-LABEL: v64i8: -; AVX512F: # BB#0: -; AVX512F-NEXT: pushq %rbp -; AVX512F-NEXT: .Lcfi3: -; AVX512F-NEXT: .cfi_def_cfa_offset 16 -; AVX512F-NEXT: .Lcfi4: -; AVX512F-NEXT: .cfi_offset %rbp, -16 -; AVX512F-NEXT: movq %rsp, %rbp -; AVX512F-NEXT: .Lcfi5: -; AVX512F-NEXT: .cfi_def_cfa_register %rbp -; AVX512F-NEXT: andq $-32, %rsp -; AVX512F-NEXT: subq $64, %rsp -; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1 -; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0 -; AVX512F-NEXT: vpcmpgtb %ymm7, %ymm5, %ymm2 -; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1 -; AVX512F-NEXT: vpcmpgtb %ymm6, %ymm4, %ymm2 -; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512F-NEXT: vpmovsxbd %xmm2, %zmm2 -; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2 -; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0 -; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) -; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 -; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 -; AVX512F-NEXT: kmovw %k0, (%rsp) -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm0 -; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 -; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 -; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) -; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm0 -; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 -; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 -; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) -; AVX512F-NEXT: movl (%rsp), %ecx -; AVX512F-NEXT: movl {{[0-9]+}}(%rsp), %eax -; AVX512F-NEXT: shlq $32, %rax -; AVX512F-NEXT: orq %rcx, %rax -; AVX512F-NEXT: movq %rbp, %rsp -; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: v64i8: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1 -; AVX512BW-NEXT: vpcmpgtb %zmm3, %zmm2, %k0 {%k1} -; AVX512BW-NEXT: kmovq %k0, %rax -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq - %x0 = icmp sgt <64 x i8> %a, %b - %x1 = icmp sgt <64 x i8> %c, %d - %y = and <64 x i1> %x0, %x1 - %res = bitcast <64 x i1> %y to i64 - ret i64 %res -} +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BW + +define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) { +; SSE-LABEL: v8i64: +; SSE: # BB#0: +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: pcmpgtq %xmm7, %xmm3 +; SSE-NEXT: pcmpgtq %xmm6, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] +; SSE-NEXT: pslld $31, %xmm2 +; SSE-NEXT: psrad $31, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSE-NEXT: pshufb %xmm3, %xmm2 +; SSE-NEXT: pcmpgtq %xmm5, %xmm1 +; SSE-NEXT: pcmpgtq %xmm4, %xmm0 +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; SSE-NEXT: pslld $31, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pshufb %xmm3, %xmm0 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE-NEXT: psllw $15, %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm11[0,2] +; SSE-NEXT: pslld $31, %xmm9 +; SSE-NEXT: psrad $31, %xmm9 +; SSE-NEXT: pshufb %xmm3, %xmm9 +; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm10[0,2] +; SSE-NEXT: pslld $31, %xmm8 +; SSE-NEXT: psrad $31, %xmm8 +; SSE-NEXT: pshufb %xmm3, %xmm8 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0] +; SSE-NEXT: psllw $15, %xmm8 +; SSE-NEXT: psraw $15, %xmm8 +; SSE-NEXT: pand %xmm0, %xmm8 +; SSE-NEXT: pshufb {{.*#+}} xmm8 = xmm8[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; SSE-NEXT: pmovmskb %xmm8, %eax +; SSE-NEXT: # kill: %AL %AL %EAX +; SSE-NEXT: retq +; +; AVX1-LABEL: v8i64: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9 +; AVX1-NEXT: vpcmpgtq %xmm8, %xmm9, %xmm8 +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpacksswb %xmm8, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm9 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufb %xmm8, %xmm0, %xmm0 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm9[0] +; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm2 +; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm3 +; AVX1-NEXT: vpacksswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpshufb %xmm8, %xmm2, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX1-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpmovmskb %xmm0, %eax +; AVX1-NEXT: # kill: %AL %AL %EAX +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: v8i64: +; AVX2: # BB#0: +; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0 +; AVX2-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpgtq %ymm7, %ymm5, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtq %ymm6, %ymm4, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 +; AVX2-NEXT: vpacksswb %xmm4, %xmm2, %xmm2 +; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX2-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpmovmskb %xmm0, %eax +; AVX2-NEXT: # kill: %AL %AL %EAX +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512F-LABEL: v8i64: +; AVX512F: # BB#0: +; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 +; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1} +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: # kill: %AL %AL %EAX +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: v8i64: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 +; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1} +; AVX512BW-NEXT: kmovd %k0, %eax +; AVX512BW-NEXT: # kill: %AL %AL %EAX +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq + %x0 = icmp sgt <8 x i64> %a, %b + %x1 = icmp sgt <8 x i64> %c, %d + %y = and <8 x i1> %x0, %x1 + %res = bitcast <8 x i1> %y to i8 + ret i8 %res +} + +define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d) { +; SSE-LABEL: v8f64: +; SSE: # BB#0: +; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: cmpltpd %xmm3, %xmm7 +; SSE-NEXT: cmpltpd %xmm2, %xmm6 +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2] +; SSE-NEXT: pslld $31, %xmm6 +; SSE-NEXT: psrad $31, %xmm6 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSE-NEXT: pshufb %xmm2, %xmm6 +; SSE-NEXT: cmpltpd %xmm1, %xmm5 +; SSE-NEXT: cmpltpd %xmm0, %xmm4 +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2] +; SSE-NEXT: pslld $31, %xmm4 +; SSE-NEXT: psrad $31, %xmm4 +; SSE-NEXT: pshufb %xmm2, %xmm4 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0] +; SSE-NEXT: psllw $15, %xmm4 +; SSE-NEXT: psraw $15, %xmm4 +; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm11[0,2] +; SSE-NEXT: pslld $31, %xmm9 +; SSE-NEXT: psrad $31, %xmm9 +; SSE-NEXT: pshufb %xmm2, %xmm9 +; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm10[0,2] +; SSE-NEXT: pslld $31, %xmm8 +; SSE-NEXT: psrad $31, %xmm8 +; SSE-NEXT: pshufb %xmm2, %xmm8 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0] +; SSE-NEXT: psllw $15, %xmm8 +; SSE-NEXT: psraw $15, %xmm8 +; SSE-NEXT: pand %xmm4, %xmm8 +; SSE-NEXT: pshufb {{.*#+}} xmm8 = xmm8[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; SSE-NEXT: pmovmskb %xmm8, %eax +; SSE-NEXT: # kill: %AL %AL %EAX +; SSE-NEXT: retq +; +; AVX12-LABEL: v8f64: +; AVX12: # BB#0: +; AVX12-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1 +; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX12-NEXT: vpacksswb %xmm3, %xmm1, %xmm1 +; AVX12-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX12-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX12-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0 +; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX12-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 +; AVX12-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX12-NEXT: vpsllw $15, %xmm0, %xmm0 +; AVX12-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX12-NEXT: vcmpltpd %ymm5, %ymm7, %ymm1 +; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX12-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 +; AVX12-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX12-NEXT: vcmpltpd %ymm4, %ymm6, %ymm2 +; AVX12-NEXT: vextractf128 $1, %ymm2, %xmm4 +; AVX12-NEXT: vpacksswb %xmm4, %xmm2, %xmm2 +; AVX12-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX12-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX12-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; AVX12-NEXT: vpmovmskb %xmm0, %eax +; AVX12-NEXT: # kill: %AL %AL %EAX +; AVX12-NEXT: vzeroupper +; AVX12-NEXT: retq +; +; AVX512F-LABEL: v8f64: +; AVX512F: # BB#0: +; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1 +; AVX512F-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1} +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: # kill: %AL %AL %EAX +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: v8f64: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1 +; AVX512BW-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1} +; AVX512BW-NEXT: kmovd %k0, %eax +; AVX512BW-NEXT: # kill: %AL %AL %EAX +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq + %x0 = fcmp ogt <8 x double> %a, %b + %x1 = fcmp ogt <8 x double> %c, %d + %y = and <8 x i1> %x0, %x1 + %res = bitcast <8 x i1> %y to i8 + ret i8 %res +} + +define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) { +; SSE-LABEL: v32i16: +; SSE: # BB#0: +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: pcmpgtw %xmm5, %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; SSE-NEXT: pshufb %xmm5, %xmm1 +; SSE-NEXT: pcmpgtw %xmm4, %xmm0 +; SSE-NEXT: pshufb %xmm5, %xmm0 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: psllw $7, %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; SSE-NEXT: pand %xmm12, %xmm0 +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: pxor %xmm4, %xmm4 +; SSE-NEXT: pcmpgtb %xmm0, %xmm4 +; SSE-NEXT: pcmpgtw %xmm7, %xmm3 +; SSE-NEXT: pshufb %xmm5, %xmm3 +; SSE-NEXT: pcmpgtw %xmm6, %xmm2 +; SSE-NEXT: pshufb %xmm5, %xmm2 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE-NEXT: psllw $7, %xmm2 +; SSE-NEXT: pand %xmm12, %xmm2 +; SSE-NEXT: pxor %xmm0, %xmm0 +; SSE-NEXT: pcmpgtb %xmm2, %xmm0 +; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: pshufb %xmm5, %xmm11 +; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: pshufb %xmm5, %xmm10 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm10 = xmm10[0],xmm11[0] +; SSE-NEXT: psllw $7, %xmm10 +; SSE-NEXT: pand %xmm12, %xmm10 +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: pcmpgtb %xmm10, %xmm2 +; SSE-NEXT: pand %xmm4, %xmm2 +; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: pshufb %xmm5, %xmm9 +; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: pshufb %xmm5, %xmm8 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0] +; SSE-NEXT: psllw $7, %xmm8 +; SSE-NEXT: pand %xmm12, %xmm8 +; SSE-NEXT: pcmpgtb %xmm8, %xmm1 +; SSE-NEXT: pand %xmm0, %xmm1 +; SSE-NEXT: pmovmskb %xmm2, %ecx +; SSE-NEXT: pmovmskb %xmm1, %eax +; SSE-NEXT: shll $16, %eax +; SSE-NEXT: orl %ecx, %eax +; SSE-NEXT: retq +; +; AVX1-LABEL: v32i16: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9 +; AVX1-NEXT: vpcmpgtw %xmm8, %xmm9, %xmm8 +; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpacksswb %xmm8, %xmm1, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2 +; AVX1-NEXT: vpcmpgtw %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpcmpgtw %xmm7, %xmm5, %xmm2 +; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpand %xmm1, %xmm8, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3 +; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpcmpgtw %xmm6, %xmm4, %xmm3 +; AVX1-NEXT: vpacksswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpmovmskb %xmm0, %ecx +; AVX1-NEXT: vpmovmskb %xmm1, %eax +; AVX1-NEXT: shll $16, %eax +; AVX1-NEXT: orl %ecx, %eax +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: v32i16: +; AVX2: # BB#0: +; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vpcmpgtw %ymm7, %ymm5, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtw %ymm6, %ymm4, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpacksswb %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 +; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpmovmskb %ymm0, %eax +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512F-LABEL: v32i16: +; AVX512F: # BB#0: +; AVX512F-NEXT: pushq %rbp +; AVX512F-NEXT: .Lcfi0: +; AVX512F-NEXT: .cfi_def_cfa_offset 16 +; AVX512F-NEXT: .Lcfi1: +; AVX512F-NEXT: .cfi_offset %rbp, -16 +; AVX512F-NEXT: movq %rsp, %rbp +; AVX512F-NEXT: .Lcfi2: +; AVX512F-NEXT: .cfi_def_cfa_register %rbp +; AVX512F-NEXT: andq $-32, %rsp +; AVX512F-NEXT: subq $32, %rsp +; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1 +; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1 +; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kshiftlw $14, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: kshiftlw $15, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %ecx +; AVX512F-NEXT: vmovd %ecx, %xmm1 +; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $13, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $12, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $11, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $10, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $9, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $8, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $7, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $6, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $5, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $4, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $3, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $2, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $1, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftrw $15, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 +; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 +; AVX512F-NEXT: kshiftlw $14, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: kshiftlw $15, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %ecx +; AVX512F-NEXT: vmovd %ecx, %xmm0 +; AVX512F-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $13, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $12, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $11, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $10, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $9, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $8, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $7, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $6, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $5, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $4, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $3, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $2, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $1, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftrw $15, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512F-NEXT: vpcmpgtw %ymm7, %ymm5, %ymm1 +; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1 +; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kshiftlw $14, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: kshiftlw $15, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %ecx +; AVX512F-NEXT: vmovd %ecx, %xmm1 +; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $13, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $12, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $11, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $10, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $9, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $8, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $7, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $6, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $5, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $4, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $3, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $2, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftlw $1, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: kshiftrw $15, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX512F-NEXT: vpcmpgtw %ymm6, %ymm4, %ymm2 +; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 +; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2 +; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0 +; AVX512F-NEXT: kshiftlw $14, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: kshiftlw $15, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %ecx +; AVX512F-NEXT: vmovd %ecx, %xmm2 +; AVX512F-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $13, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $12, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $11, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $10, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $9, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $8, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $7, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $6, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $5, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $4, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $3, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $2, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftlw $1, %k0, %k1 +; AVX512F-NEXT: kshiftrw $15, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, %eax +; AVX512F-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: kshiftrw $15, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2 +; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 +; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 +; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) +; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 +; AVX512F-NEXT: kmovw %k0, (%rsp) +; AVX512F-NEXT: movl (%rsp), %eax +; AVX512F-NEXT: movq %rbp, %rsp +; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: v32i16: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1 +; AVX512BW-NEXT: vpcmpgtw %zmm3, %zmm2, %k0 {%k1} +; AVX512BW-NEXT: kmovd %k0, %eax +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq + %x0 = icmp sgt <32 x i16> %a, %b + %x1 = icmp sgt <32 x i16> %c, %d + %y = and <32 x i1> %x0, %x1 + %res = bitcast <32 x i1> %y to i32 + ret i32 %res +} + +define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) { +; SSE-LABEL: v16i32: +; SSE: # BB#0: +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: pcmpgtd %xmm7, %xmm3 +; SSE-NEXT: movdqa {{.*#+}} xmm7 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSE-NEXT: pshufb %xmm7, %xmm3 +; SSE-NEXT: pcmpgtd %xmm6, %xmm2 +; SSE-NEXT: pshufb %xmm7, %xmm2 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE-NEXT: psllw $15, %xmm2 +; SSE-NEXT: psraw $15, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; SSE-NEXT: pshufb %xmm3, %xmm2 +; SSE-NEXT: pcmpgtd %xmm5, %xmm1 +; SSE-NEXT: pshufb %xmm7, %xmm1 +; SSE-NEXT: pcmpgtd %xmm4, %xmm0 +; SSE-NEXT: pshufb %xmm7, %xmm0 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: psllw $15, %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pshufb %xmm3, %xmm0 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE-NEXT: psllw $7, %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: pxor %xmm4, %xmm4 +; SSE-NEXT: pcmpgtb %xmm0, %xmm4 +; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: pshufb %xmm7, %xmm11 +; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: pshufb %xmm7, %xmm9 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0] +; SSE-NEXT: psllw $15, %xmm9 +; SSE-NEXT: psraw $15, %xmm9 +; SSE-NEXT: pshufb %xmm3, %xmm9 +; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: pshufb %xmm7, %xmm10 +; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: pshufb %xmm7, %xmm8 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0] +; SSE-NEXT: psllw $15, %xmm8 +; SSE-NEXT: psraw $15, %xmm8 +; SSE-NEXT: pshufb %xmm3, %xmm8 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0] +; SSE-NEXT: psllw $7, %xmm8 +; SSE-NEXT: pand %xmm2, %xmm8 +; SSE-NEXT: pcmpgtb %xmm8, %xmm1 +; SSE-NEXT: pand %xmm4, %xmm1 +; SSE-NEXT: pmovmskb %xmm1, %eax +; SSE-NEXT: # kill: %AX %AX %EAX +; SSE-NEXT: retq +; +; AVX1-LABEL: v16i32: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9 +; AVX1-NEXT: vpcmpgtd %xmm8, %xmm9, %xmm8 +; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpacksswb %xmm8, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm9 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufb %xmm8, %xmm0, %xmm0 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm9[0] +; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; AVX1-NEXT: vpand %xmm9, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm1 +; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtd %xmm7, %xmm5, %xmm3 +; AVX1-NEXT: vpacksswb %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5 +; AVX1-NEXT: vpcmpgtd %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpcmpgtd %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vpacksswb %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpshufb %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0] +; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm9, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpmovmskb %xmm0, %eax +; AVX1-NEXT: # kill: %AX %AX %EAX +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: v16i32: +; AVX2: # BB#0: +; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vpcmpgtd %ymm7, %ymm5, %ymm5 +; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm7 +; AVX2-NEXT: vpacksswb %xmm7, %xmm5, %xmm5 +; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5 +; AVX2-NEXT: vpcmpgtd %ymm6, %ymm4, %ymm4 +; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm6 +; AVX2-NEXT: vpacksswb %xmm6, %xmm4, %xmm4 +; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm3 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] +; AVX2-NEXT: vpsllw $7, %xmm3, %xmm3 +; AVX2-NEXT: vpand %xmm1, %xmm3, %xmm1 +; AVX2-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpmovmskb %xmm0, %eax +; AVX2-NEXT: # kill: %AX %AX %EAX +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512F-LABEL: v16i32: +; AVX512F: # BB#0: +; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1 +; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1} +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: # kill: %AX %AX %EAX +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: v16i32: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k1 +; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1} +; AVX512BW-NEXT: kmovd %k0, %eax +; AVX512BW-NEXT: # kill: %AX %AX %EAX +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq + %x0 = icmp sgt <16 x i32> %a, %b + %x1 = icmp sgt <16 x i32> %c, %d + %y = and <16 x i1> %x0, %x1 + %res = bitcast <16 x i1> %y to i16 + ret i16 %res +} + +define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x float> %d) { +; SSE-LABEL: v16f32: +; SSE: # BB#0: +; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: cmpltps %xmm3, %xmm7 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSE-NEXT: pshufb %xmm3, %xmm7 +; SSE-NEXT: cmpltps %xmm2, %xmm6 +; SSE-NEXT: pshufb %xmm3, %xmm6 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0] +; SSE-NEXT: psllw $15, %xmm6 +; SSE-NEXT: psraw $15, %xmm6 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; SSE-NEXT: pshufb %xmm2, %xmm6 +; SSE-NEXT: cmpltps %xmm1, %xmm5 +; SSE-NEXT: pshufb %xmm3, %xmm5 +; SSE-NEXT: cmpltps %xmm0, %xmm4 +; SSE-NEXT: pshufb %xmm3, %xmm4 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0] +; SSE-NEXT: psllw $15, %xmm4 +; SSE-NEXT: psraw $15, %xmm4 +; SSE-NEXT: pshufb %xmm2, %xmm4 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0] +; SSE-NEXT: psllw $7, %xmm4 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; SSE-NEXT: pand %xmm1, %xmm4 +; SSE-NEXT: xorps %xmm0, %xmm0 +; SSE-NEXT: pxor %xmm5, %xmm5 +; SSE-NEXT: pcmpgtb %xmm4, %xmm5 +; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: pshufb %xmm3, %xmm11 +; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: pshufb %xmm3, %xmm9 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0] +; SSE-NEXT: psllw $15, %xmm9 +; SSE-NEXT: psraw $15, %xmm9 +; SSE-NEXT: pshufb %xmm2, %xmm9 +; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: pshufb %xmm3, %xmm10 +; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: pshufb %xmm3, %xmm8 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0] +; SSE-NEXT: psllw $15, %xmm8 +; SSE-NEXT: psraw $15, %xmm8 +; SSE-NEXT: pshufb %xmm2, %xmm8 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0] +; SSE-NEXT: psllw $7, %xmm8 +; SSE-NEXT: pand %xmm1, %xmm8 +; SSE-NEXT: pcmpgtb %xmm8, %xmm0 +; SSE-NEXT: pand %xmm5, %xmm0 +; SSE-NEXT: pmovmskb %xmm0, %eax +; SSE-NEXT: # kill: %AX %AX %EAX +; SSE-NEXT: retq +; +; AVX12-LABEL: v16f32: +; AVX12: # BB#0: +; AVX12-NEXT: vcmpltps %ymm1, %ymm3, %ymm1 +; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX12-NEXT: vpacksswb %xmm3, %xmm1, %xmm1 +; AVX12-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX12-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX12-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 +; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX12-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 +; AVX12-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX12-NEXT: vpsllw $7, %xmm0, %xmm0 +; AVX12-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX12-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 +; AVX12-NEXT: vcmpltps %ymm5, %ymm7, %ymm5 +; AVX12-NEXT: vextractf128 $1, %ymm5, %xmm7 +; AVX12-NEXT: vpacksswb %xmm7, %xmm5, %xmm5 +; AVX12-NEXT: vpshufb %xmm3, %xmm5, %xmm5 +; AVX12-NEXT: vcmpltps %ymm4, %ymm6, %ymm4 +; AVX12-NEXT: vextractf128 $1, %ymm4, %xmm6 +; AVX12-NEXT: vpacksswb %xmm6, %xmm4, %xmm4 +; AVX12-NEXT: vpshufb %xmm3, %xmm4, %xmm3 +; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] +; AVX12-NEXT: vpsllw $7, %xmm3, %xmm3 +; AVX12-NEXT: vpand %xmm1, %xmm3, %xmm1 +; AVX12-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 +; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpmovmskb %xmm0, %eax +; AVX12-NEXT: # kill: %AX %AX %EAX +; AVX12-NEXT: vzeroupper +; AVX12-NEXT: retq +; +; AVX512F-LABEL: v16f32: +; AVX512F: # BB#0: +; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1 +; AVX512F-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1} +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: # kill: %AX %AX %EAX +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: v16f32: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k1 +; AVX512BW-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1} +; AVX512BW-NEXT: kmovd %k0, %eax +; AVX512BW-NEXT: # kill: %AX %AX %EAX +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq + %x0 = fcmp ogt <16 x float> %a, %b + %x1 = fcmp ogt <16 x float> %c, %d + %y = and <16 x i1> %x0, %x1 + %res = bitcast <16 x i1> %y to i16 + ret i16 %res +} + +define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) { +; SSE-LABEL: v64i8: +; SSE: # BB#0: +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: pcmpgtb %xmm6, %xmm2 +; SSE-NEXT: pcmpgtb %xmm7, %xmm3 +; SSE-NEXT: pcmpgtb %xmm4, %xmm0 +; SSE-NEXT: pcmpgtb %xmm5, %xmm1 +; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT: pand %xmm2, %xmm8 +; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm9 +; SSE-NEXT: pand %xmm3, %xmm9 +; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm10 +; SSE-NEXT: pand %xmm0, %xmm10 +; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: pand %xmm1, %xmm11 +; SSE-NEXT: pextrb $15, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $14, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $13, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $12, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $11, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $10, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $9, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $8, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $7, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $6, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $5, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $4, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $3, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $2, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $1, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $0, %xmm11, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $15, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $14, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $13, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $12, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $11, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $10, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $9, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $8, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $7, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $6, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $5, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $4, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $3, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $2, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $1, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $0, %xmm10, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $15, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $14, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $13, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $12, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $11, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $10, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $9, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $8, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $7, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $6, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $5, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $4, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $3, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $2, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $1, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $0, %xmm9, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $15, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $14, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $13, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $12, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $11, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $10, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $9, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $8, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $7, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $6, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $5, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $4, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $3, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $2, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $1, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: pextrb $0, %xmm8, %eax +; SSE-NEXT: andb $1, %al +; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: shll $16, %eax +; SSE-NEXT: movzwl -{{[0-9]+}}(%rsp), %ecx +; SSE-NEXT: orl %eax, %ecx +; SSE-NEXT: movl -{{[0-9]+}}(%rsp), %edx +; SSE-NEXT: shll $16, %edx +; SSE-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: orl %edx, %eax +; SSE-NEXT: shlq $32, %rax +; SSE-NEXT: orq %rcx, %rax +; SSE-NEXT: retq +; +; AVX1-LABEL: v64i8: +; AVX1: # BB#0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: .Lcfi0: +; AVX1-NEXT: .cfi_def_cfa_offset 16 +; AVX1-NEXT: .Lcfi1: +; AVX1-NEXT: .cfi_offset %rbp, -16 +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: .Lcfi2: +; AVX1-NEXT: .cfi_def_cfa_register %rbp +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $64, %rsp +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9 +; AVX1-NEXT: vpcmpgtb %xmm8, %xmm9, %xmm8 +; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm1, %ymm8 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm0 +; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2 +; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpcmpgtb %xmm7, %xmm5, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vandps %ymm0, %ymm8, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3 +; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpcmpgtb %xmm6, %xmm4, %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 +; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpextrb $15, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $14, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $13, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $12, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $11, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $10, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $9, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $8, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $7, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $6, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $5, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $4, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $3, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $2, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $1, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $0, %xmm2, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $15, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $14, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $13, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $12, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $11, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $10, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $9, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $8, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $7, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $6, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $5, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $4, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $3, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $2, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $1, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vpextrb $0, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, (%rsp) +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrb $15, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $14, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $13, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $12, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $11, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $10, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $9, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $8, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $7, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $6, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $5, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $4, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $3, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $2, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $1, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $0, %xmm1, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $15, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $14, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $13, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $12, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $11, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $10, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $9, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $8, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $7, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $6, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $5, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $4, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $3, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $2, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $1, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $0, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX1-NEXT: movl (%rsp), %ecx +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: shlq $32, %rax +; AVX1-NEXT: orq %rcx, %rax +; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: v64i8: +; AVX2: # BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .Lcfi0: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .Lcfi1: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .Lcfi2: +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $64, %rsp +; AVX2-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm2 +; AVX2-NEXT: vpcmpgtb %ymm7, %ymm5, %ymm0 +; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpcmpgtb %ymm6, %ymm4, %ymm1 +; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpextrb $15, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $14, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $13, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $12, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $11, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $10, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $9, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $8, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $7, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $6, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $5, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $4, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $3, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $2, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $1, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $0, %xmm2, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $15, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $14, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $13, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $12, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $11, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $10, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $9, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $8, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $7, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $6, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $5, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $4, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $3, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $2, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $1, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $0, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrb $15, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $14, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $13, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $12, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $11, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $10, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $9, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $8, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $7, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $6, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $5, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $4, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $3, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $2, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $1, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $0, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $15, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $14, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $13, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $12, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $11, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $10, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $9, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $8, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $7, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $6, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $5, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $4, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $3, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $2, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $1, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $0, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX2-NEXT: movl (%rsp), %ecx +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: shlq $32, %rax +; AVX2-NEXT: orq %rcx, %rax +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512F-LABEL: v64i8: +; AVX512F: # BB#0: +; AVX512F-NEXT: pushq %rbp +; AVX512F-NEXT: .Lcfi3: +; AVX512F-NEXT: .cfi_def_cfa_offset 16 +; AVX512F-NEXT: .Lcfi4: +; AVX512F-NEXT: .cfi_offset %rbp, -16 +; AVX512F-NEXT: movq %rsp, %rbp +; AVX512F-NEXT: .Lcfi5: +; AVX512F-NEXT: .cfi_def_cfa_register %rbp +; AVX512F-NEXT: andq $-32, %rsp +; AVX512F-NEXT: subq $64, %rsp +; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1 +; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vpcmpgtb %ymm7, %ymm5, %ymm2 +; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512F-NEXT: vpcmpgtb %ymm6, %ymm4, %ymm2 +; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512F-NEXT: vpmovsxbd %xmm2, %zmm2 +; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2 +; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0 +; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) +; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 +; AVX512F-NEXT: kmovw %k0, (%rsp) +; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm0 +; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 +; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) +; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm0 +; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 +; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) +; AVX512F-NEXT: movl (%rsp), %ecx +; AVX512F-NEXT: movl {{[0-9]+}}(%rsp), %eax +; AVX512F-NEXT: shlq $32, %rax +; AVX512F-NEXT: orq %rcx, %rax +; AVX512F-NEXT: movq %rbp, %rsp +; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: v64i8: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1 +; AVX512BW-NEXT: vpcmpgtb %zmm3, %zmm2, %k0 {%k1} +; AVX512BW-NEXT: kmovq %k0, %rax +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq + %x0 = icmp sgt <64 x i8> %a, %b + %x1 = icmp sgt <64 x i8> %c, %d + %y = and <64 x i1> %x0, %x1 + %res = bitcast <64 x i1> %y to i64 + ret i64 %res +} Index: llvm/trunk/test/CodeGen/X86/vselect-packss.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vselect-packss.ll +++ llvm/trunk/test/CodeGen/X86/vselect-packss.ll @@ -10,15 +10,24 @@ ; define <16 x i8> @vselect_packss_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2, <16 x i8> %a3) { -; SSE-LABEL: vselect_packss_v16i16: -; SSE: # BB#0: -; SSE-NEXT: pcmpeqw %xmm3, %xmm1 -; SSE-NEXT: pcmpeqw %xmm2, %xmm0 -; SSE-NEXT: packsswb %xmm1, %xmm0 -; SSE-NEXT: pand %xmm0, %xmm4 -; SSE-NEXT: pandn %xmm5, %xmm0 -; SSE-NEXT: por %xmm4, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: vselect_packss_v16i16: +; SSE2: # BB#0: +; SSE2-NEXT: pcmpeqw %xmm3, %xmm1 +; SSE2-NEXT: pcmpeqw %xmm2, %xmm0 +; SSE2-NEXT: packsswb %xmm1, %xmm0 +; SSE2-NEXT: pand %xmm0, %xmm4 +; SSE2-NEXT: pandn %xmm5, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: vselect_packss_v16i16: +; SSE42: # BB#0: +; SSE42-NEXT: pcmpeqw %xmm3, %xmm1 +; SSE42-NEXT: pcmpeqw %xmm2, %xmm0 +; SSE42-NEXT: packsswb %xmm1, %xmm0 +; SSE42-NEXT: pblendvb %xmm0, %xmm4, %xmm5 +; SSE42-NEXT: movdqa %xmm5, %xmm0 +; SSE42-NEXT: retq ; ; AVX1-LABEL: vselect_packss_v16i16: ; AVX1: # BB#0: @@ -27,9 +36,7 @@ ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm5, %xmm4 ; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm1 -; AVX1-NEXT: vpandn %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -38,9 +45,7 @@ ; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm1 -; AVX2-NEXT: vpandn %xmm3, %xmm0, %xmm0 -; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -394,15 +399,24 @@ ; define <16 x i8> @vselect_packss(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2, <16 x i8> %a3) { -; SSE-LABEL: vselect_packss: -; SSE: # BB#0: -; SSE-NEXT: pcmpeqw %xmm3, %xmm1 -; SSE-NEXT: pcmpeqw %xmm2, %xmm0 -; SSE-NEXT: packsswb %xmm1, %xmm0 -; SSE-NEXT: pand %xmm0, %xmm4 -; SSE-NEXT: pandn %xmm5, %xmm0 -; SSE-NEXT: por %xmm4, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: vselect_packss: +; SSE2: # BB#0: +; SSE2-NEXT: pcmpeqw %xmm3, %xmm1 +; SSE2-NEXT: pcmpeqw %xmm2, %xmm0 +; SSE2-NEXT: packsswb %xmm1, %xmm0 +; SSE2-NEXT: pand %xmm0, %xmm4 +; SSE2-NEXT: pandn %xmm5, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: vselect_packss: +; SSE42: # BB#0: +; SSE42-NEXT: pcmpeqw %xmm3, %xmm1 +; SSE42-NEXT: pcmpeqw %xmm2, %xmm0 +; SSE42-NEXT: packsswb %xmm1, %xmm0 +; SSE42-NEXT: pblendvb %xmm0, %xmm4, %xmm5 +; SSE42-NEXT: movdqa %xmm5, %xmm0 +; SSE42-NEXT: retq ; ; AVX1-LABEL: vselect_packss: ; AVX1: # BB#0: @@ -411,9 +425,7 @@ ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm5, %xmm4 ; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm1 -; AVX1-NEXT: vpandn %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -422,9 +434,7 @@ ; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm1 -; AVX2-NEXT: vpandn %xmm3, %xmm0, %xmm0 -; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -433,9 +443,7 @@ ; AVX512-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpand %xmm2, %xmm0, %xmm1 -; AVX512-NEXT: vpandn %xmm3, %xmm0, %xmm0 -; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp eq <16 x i16> %a0, %a1