Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -28934,12 +28934,111 @@ EltNo); } +// Try to match patterns such as +// (i16 bitcast (v16i1 setcc v16i8 v1, v2, gt)) +// -> +// (movmsk (v16i8 setcc v16i8 v1, v2, gt)) +// before the setcc result is scalarized on subtargets that don't have legal +// vxi1 types. +static SDValue combineBitcastOfSetCC(SelectionDAG &DAG, SDLoc DL, EVT VT, + SDValue SetCC, const X86Subtarget &Subtarget) { + if (!VT.isScalarInteger() || SetCC.getValueType().getScalarType() != MVT::i1) + return SDValue(); + + // With AVX512 vxi1 types are legal and we prefer using k-regs. + // MOVMSK is supported in SSE2 or later. + if (Subtarget.hasAVX512() || !Subtarget.hasSSE2()) + return SDValue(); + + if (!SetCC->getOperand(0).getValueType().isSimple() || + (SetCC->getOperand(0).getValueType().getSizeInBits() == 256 && + !Subtarget.hasAVX2())) + return SDValue(); + + // Type of operands the setcc compares + MVT InVT = SetCC->getOperand(0).getSimpleValueType(); + + MVT WideMaskVT = MVT::INVALID_SIMPLE_VALUE_TYPE; + switch (InVT.SimpleTy) { + default: + return SDValue(); + case MVT::v16i8: + case MVT::v4f32: + case MVT::v2f64: + case MVT::v32i8: + case MVT::v8f32: + case MVT::v4f64: + WideMaskVT = InVT; + break; + // TODO: we could merge the following cases if there were + // EVT::InVT.changeVectorElementTypeToFloatingPoint() + case MVT::v4i32: + WideMaskVT = MVT::v4f32; + break; + case MVT::v2i64: + WideMaskVT = MVT::v2f64; + break; + case MVT::v4i64: + WideMaskVT = MVT::v4f64; + break; + case MVT::v8i32: + WideMaskVT = MVT::v8f32; + break; + // There is no MOVMSK operation for vectors of i16, but we can shuffle the + // setcc's result into a vector of i8's. See below. + case MVT::v8i16: + WideMaskVT = MVT::v16i8; + break; + case MVT::v16i16: + WideMaskVT = MVT::v32i8; + break; + }; + + assert(WideMaskVT != MVT::INVALID_SIMPLE_VALUE_TYPE && + "WideMaskVT was not set"); + + EVT SetCCVT = EVT(InVT).changeVectorElementTypeToInteger(); + SDValue V = DAG.getSetCC(SDLoc(SetCC), SetCCVT, SetCC->getOperand(0), + SetCC->getOperand(1), + cast(SetCC->getOperand(2))->get()); + + if (SetCCVT != WideMaskVT) + V = DAG.getBitcast(WideMaskVT, V); + + // There are MOVMSK flavors for vector with elements of i8, f32, f64. So we + // have all 128-bit and 256-bit vectors covered except for v8i16 and v16i16. + // For these two cases we can shuffle the upper element bytes to a consecutive + // sequence at the start of the vector. + // For example, t0 := (v8i16 setcc (v8i16 v1, v2, gt)) needs to be + // shuffled as: + // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef) + if (InVT == MVT::v8i16 || InVT==MVT::v16i16) { + SmallVector Mask(WideMaskVT.getVectorNumElements(), -1); + for (unsigned i = 0, e = InVT.getVectorNumElements(); i != e; ++i) + Mask[i] = 2 * i; + V = DAG.getBitcast(WideMaskVT, V); + V = DAG.getVectorShuffle(WideMaskVT, DL, V, DAG.getUNDEF(WideMaskVT), Mask); + } + V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V); + return DAG.getZExtOrTrunc(V, DL, VT); +} + static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) { SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); EVT SrcVT = N0.getValueType(); + // Try to match patterns such as + // (i16 bitcast (v16i1 setcc v16i8 v1, v2, gt)) + // -> + // (movmsk (v16i8 setcc v16i8 v1, v2, gt)) + // before the setcc result is scalarized on subtargets that don't have legal + // vxi1 types. + if (DCI.isBeforeLegalize() && N0.getOpcode() == ISD::SETCC) + if (SDValue V = combineBitcastOfSetCC(DAG, SDLoc(N), VT, N0, Subtarget)) + return V; // Since MMX types are special and don't usually play with other vector types, // it's better to handle them early to be sure we emit efficient code by // avoiding store-load conversions. @@ -35044,7 +35143,7 @@ case ISD::VSELECT: case ISD::SELECT: case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget); - case ISD::BITCAST: return combineBitcast(N, DAG, Subtarget); + case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget); case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget); case ISD::ADD: return combineAdd(N, DAG, Subtarget); case ISD::SUB: return combineSub(N, DAG, Subtarget); Index: test/CodeGen/X86/bitcast-setcc-128.ll =================================================================== --- test/CodeGen/X86/bitcast-setcc-128.ll +++ test/CodeGen/X86/bitcast-setcc-128.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=CHECK,SSE2 -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 < %s | FileCheck %s --check-prefixes=CHECK,SSSE3 +; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=CHECK,SSE2-SSSE3,SSE2 +; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 < %s | FileCheck %s --check-prefixes=CHECK,SSE2-SSSE3,SSSE3 ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=CHECK,AVX1 ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefixes=CHECK,AVX512 @@ -8,91 +8,26 @@ ; SSE2-LABEL: v8i16: ; SSE2: ## BB#0: ; SSE2-NEXT: pcmpgtw %xmm1, %xmm0 -; SSE2-NEXT: pextrw $7, %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pextrw $6, %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pextrw $5, %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pextrw $4, %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pextrw $3, %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pextrw $2, %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pextrw $1, %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: packuswb %xmm0, %xmm0 +; SSE2-NEXT: pmovmskb %xmm0, %eax +; SSE2-NEXT: ## kill: %AL %AL %EAX ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v8i16: ; SSSE3: ## BB#0: ; SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 -; SSSE3-NEXT: pextrw $7, %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pextrw $6, %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pextrw $5, %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pextrw $4, %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pextrw $3, %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pextrw $2, %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pextrw $1, %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSSE3-NEXT: ## kill: %AL %AL %EAX ; SSSE3-NEXT: retq ; ; AVX1-LABEL: v8i16: ; AVX1: ## BB#0: ; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrw $7, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $6, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $5, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $4, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $3, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $2, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $1, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpmovmskb %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v8i16: @@ -107,64 +42,18 @@ } define i4 @v4i32(<4 x i32> %a, <4 x i32> %b) { -; SSE2-LABEL: v4i32: -; SSE2: ## BB#0: -; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; SSE2-NEXT: movd %xmm1, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm1, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: retq -; -; SSSE3-LABEL: v4i32: -; SSSE3: ## BB#0: -; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; SSSE3-NEXT: movd %xmm1, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSSE3-NEXT: movd %xmm1, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: retq +; SSE2-SSSE3-LABEL: v4i32: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v4i32: ; AVX1: ## BB#0: ; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrd $3, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrd $2, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrd $1, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskps %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v4i32: @@ -180,66 +69,18 @@ } define i4 @v4f32(<4 x float> %a, <4 x float> %b) { -; SSE2-LABEL: v4f32: -; SSE2: ## BB#0: -; SSE2-NEXT: cmpltps %xmm0, %xmm1 -; SSE2-NEXT: movd %xmm1, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movaps %xmm1, %xmm0 -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSE2-NEXT: movd %xmm1, %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: retq -; -; SSSE3-LABEL: v4f32: -; SSSE3: ## BB#0: -; SSSE3-NEXT: cmpltps %xmm0, %xmm1 -; SSSE3-NEXT: movd %xmm1, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movaps %xmm1, %xmm0 -; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSSE3-NEXT: movd %xmm1, %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: retq +; SSE2-SSSE3-LABEL: v4f32: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v4f32: ; AVX1: ## BB#0: ; AVX1-NEXT: vcmpltps %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vextractps $3, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vextractps $2, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vextractps $1, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vextractps $0, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskps %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v4f32: @@ -255,168 +96,18 @@ } define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) { -; SSE2-LABEL: v16i8: -; SSE2: ## BB#0: -; SSE2-NEXT: pcmpgtb %xmm1, %xmm0 -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl -; SSE2-NEXT: andb $1, %cl -; SSE2-NEXT: movb %cl, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: retq -; -; SSSE3-LABEL: v16i8: -; SSSE3: ## BB#0: -; SSSE3-NEXT: pcmpgtb %xmm1, %xmm0 -; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %cl -; SSSE3-NEXT: andb $1, %cl -; SSSE3-NEXT: movb %cl, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: andb $1, %al -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: retq +; SSE2-SSSE3-LABEL: v16i8: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSE2-SSSE3-NEXT: ## kill: %AX %AX %EAX +; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v16i8: ; AVX1: ## BB#0: ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrb $15, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $14, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $13, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $12, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $11, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $10, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $9, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $8, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $7, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $6, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $5, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $4, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $3, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $2, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $1, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $0, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: vpmovmskb %xmm0, %eax +; AVX1-NEXT: ## kill: %AX %AX %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v16i8: @@ -431,62 +122,28 @@ } define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) { -; SSE2-LABEL: v2i64: -; SSE2: ## BB#0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] -; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm1, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] -; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] -; SSE2-NEXT: por %xmm0, %xmm1 -; SSE2-NEXT: movq %xmm1, %rax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-NEXT: movq %xmm0, %rax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: retq -; -; SSSE3-LABEL: v2i64: -; SSSE3: ## BB#0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] -; SSSE3-NEXT: pxor %xmm2, %xmm1 -; SSSE3-NEXT: pxor %xmm2, %xmm0 -; SSSE3-NEXT: movdqa %xmm0, %xmm2 -; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] -; SSSE3-NEXT: pcmpeqd %xmm1, %xmm0 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSSE3-NEXT: pand %xmm3, %xmm0 -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] -; SSSE3-NEXT: por %xmm0, %xmm1 -; SSSE3-NEXT: movq %xmm1, %rax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSSE3-NEXT: movq %xmm0, %rax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: retq +; SSE2-SSSE3-LABEL: v2i64: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] +; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] +; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i64: ; AVX1: ## BB#0: ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskpd %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v2i64: @@ -502,42 +159,18 @@ } define i2 @v2f64(<2 x double> %a, <2 x double> %b) { -; SSE2-LABEL: v2f64: -; SSE2: ## BB#0: -; SSE2-NEXT: cmpltpd %xmm0, %xmm1 -; SSE2-NEXT: movq %xmm1, %rax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-NEXT: movq %xmm0, %rax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: retq -; -; SSSE3-LABEL: v2f64: -; SSSE3: ## BB#0: -; SSSE3-NEXT: cmpltpd %xmm0, %xmm1 -; SSSE3-NEXT: movq %xmm1, %rax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSSE3-NEXT: movq %xmm0, %rax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSSE3-NEXT: retq +; SSE2-SSSE3-LABEL: v2f64: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2f64: ; AVX1: ## BB#0: ; AVX1-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskpd %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v2f64: Index: test/CodeGen/X86/bitcast-setcc-256.ll =================================================================== --- test/CodeGen/X86/bitcast-setcc-256.ll +++ test/CodeGen/X86/bitcast-setcc-256.ll @@ -7,56 +7,12 @@ ; AVX2: ## BB#0: ; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpextrb $15, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $14, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $13, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $12, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $11, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $10, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $9, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $8, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $7, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $6, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $5, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $4, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $3, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $2, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $1, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $0, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpmovmskb %ymm0, %eax +; AVX2-NEXT: ## kill: %AX %AX %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -76,33 +32,8 @@ ; AVX2-LABEL: v8i32: ; AVX2: ## BB#0: ; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpextrw $7, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $6, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $5, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $4, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $3, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $2, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $1, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vmovmskps %ymm0, %eax +; AVX2-NEXT: ## kill: %AL %AL %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -122,33 +53,8 @@ ; AVX2-LABEL: v8f32: ; AVX2: ## BB#0: ; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpextrw $7, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $6, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $5, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $4, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $3, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $2, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $1, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vmovmskps %ymm0, %eax +; AVX2-NEXT: ## kill: %AL %AL %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -167,117 +73,8 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX2-LABEL: v32i8: ; AVX2: ## BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: Lcfi0: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: Lcfi1: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: Lcfi2: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $32, %rsp ; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpextrb $15, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $14, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $13, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $12, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $11, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $10, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $9, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $8, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $7, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $6, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $5, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $4, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $3, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $2, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $1, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $0, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $15, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $14, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $13, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $12, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $11, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $10, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $9, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $8, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $7, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $6, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $5, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $4, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $3, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $2, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $1, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $0, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: movl (%rsp), %eax -; AVX2-NEXT: movq %rbp, %rsp -; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -296,21 +93,8 @@ ; AVX2-LABEL: v4i64: ; AVX2: ## BB#0: ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpextrd $3, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrd $2, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrd $1, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vmovmskpd %ymm0, %eax +; AVX2-NEXT: ## kill: %AL %AL %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -331,21 +115,8 @@ ; AVX2-LABEL: v4f64: ; AVX2: ## BB#0: ; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpextrd $3, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrd $2, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrd $1, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vmovmskpd %ymm0, %eax +; AVX2-NEXT: ## kill: %AL %AL %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ;