Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -28933,12 +28933,104 @@ EltNo); } +// Try to match patterns such as +// (i16 bitcast (v16i1 setgt v16i8 v1, v2)) +// -> +// (i16 movmsk (16i8 sext (v16i1 setgt v16i8 v1, v2))) +// before the setcc result is scalarized on subtargets that don't have legal +// vxi1 types. +// TODO: Can we generalize this to more than just setcc? +static SDValue combineBitcastOfSetCC(SelectionDAG &DAG, SDValue BitCast, + const X86Subtarget &Subtarget) { + EVT VT = BitCast.getValueType(); + EVT VecVT = BitCast.getOperand(0)->getValueType(0); + SDValue N0 = BitCast.getOperand(0); + + if (!VT.isScalarInteger() || N0.getValueType().getScalarType() != MVT::i1 || + N0.getOpcode() != ISD::SETCC) + return SDValue(); + + // With AVX512 vxi1 types are legal and we prefer using k-regs. + // MOVMSK is supported in SSE2 or later. + if (Subtarget.hasAVX512() || !Subtarget.hasSSE2()) + return SDValue(); + + bool Is256BitCmp = false; + if (N0.getOperand(0).getValueSizeInBits() == 256) { + if (Subtarget.hasAVX2()) + Is256BitCmp = true; + else + return SDValue(); + } + + // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and + // v8f64. + // So all legal 128-bit and 256-bit vectors are covered except for v8i16 and + // v16i16. + // For these two cases we can shuffle the upper element bytes to a consecutive + // sequence at the start of the vector and treat the results as v16i8 or + // v32i8. + // For example, t0 := (v8i16 sext(v8i1 setgt (v8i16 v1, v2))) needs to be + // shuffled as: + // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef) + MVT SExtVT; + MVT FPCastVT = MVT::INVALID_SIMPLE_VALUE_TYPE; + switch (VecVT.getVectorNumElements()) { + default: + return SDValue(); + case 2: + SExtVT = MVT::v2i64; + FPCastVT = MVT::v2f64; + break; + case 4: + SExtVT = Is256BitCmp ? MVT::v4i64 : MVT::v4i32; + FPCastVT = Is256BitCmp ? MVT::v4f64 : MVT::v4f32; + break; + case 8: + SExtVT = Is256BitCmp ? MVT::v8i32 : MVT::v8i16; + if (Is256BitCmp) + FPCastVT = MVT::v8f32; + break; + case 16: + SExtVT = Is256BitCmp ? MVT::v16i16 : MVT::v16i8; + break; + case 32: + SExtVT = MVT::v32i8; + break; + }; + + SDValue V = DAG.getSExtOrTrunc(N0, SDLoc(BitCast), SExtVT); + if (SExtVT == MVT::v8i16 || SExtVT == MVT::v16i16) { + MVT ShuffleVT = Is256BitCmp ? MVT::v32i8 : MVT::v16i8; + SmallVector Mask(ShuffleVT.getVectorNumElements(), -1); + for (int i = 0, e = VecVT.getVectorNumElements(); i != e; ++i) + Mask[i] = 2 * i; + V = DAG.getBitcast(ShuffleVT, V); + V = DAG.getVectorShuffle(ShuffleVT, SDLoc(BitCast), V, + DAG.getUNDEF(ShuffleVT), Mask); + } + if (FPCastVT != MVT::INVALID_SIMPLE_VALUE_TYPE) + V = DAG.getBitcast(FPCastVT, V); + V = DAG.getNode(X86ISD::MOVMSK, SDLoc(BitCast), MVT::i32, V); + return DAG.getZExtOrTrunc(V, SDLoc(BitCast), VT); +} + static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) { SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); EVT SrcVT = N0.getValueType(); + // Try to match patterns such as + // (i16 bitcast (v16i1 setcc v16i8 v1, v2, gt)) + // -> + // (i16 movmsk (16i8 sext (v16i1 setgt v16i8 v1, v2))) + // before the setcc result is scalarized on subtargets that don't have legal + // vxi1 types. + if (DCI.isBeforeLegalize()) + if (SDValue V = combineBitcastOfSetCC(DAG, SDValue(N, 0), Subtarget)) + return V; // Since MMX types are special and don't usually play with other vector types, // it's better to handle them early to be sure we emit efficient code by // avoiding store-load conversions. @@ -35041,7 +35133,7 @@ case ISD::VSELECT: case ISD::SELECT: case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget); - case ISD::BITCAST: return combineBitcast(N, DAG, Subtarget); + case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget); case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget); case ISD::ADD: return combineAdd(N, DAG, Subtarget); case ISD::SUB: return combineSub(N, DAG, Subtarget); Index: test/CodeGen/X86/bitcast-setcc-128.ll =================================================================== --- test/CodeGen/X86/bitcast-setcc-128.ll +++ test/CodeGen/X86/bitcast-setcc-128.ll @@ -5,64 +5,29 @@ ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefixes=CHECK,AVX512 define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) { -; SSE2-SSSE3-LABEL: v8i16: -; SSE2-SSSE3: ## BB#0: -; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: pextrw $7, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $6, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $5, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $3, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $2, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: retq +; SSE2-LABEL: v8i16: +; SSE2: ## BB#0: +; SSE2-NEXT: pcmpgtw %xmm1, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: packuswb %xmm0, %xmm0 +; SSE2-NEXT: pmovmskb %xmm0, %eax +; SSE2-NEXT: ## kill: %AL %AL %EAX +; SSE2-NEXT: retq +; +; SSSE3-LABEL: v8i16: +; SSSE3: ## BB#0: +; SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 +; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSSE3-NEXT: retq ; ; AVX1-LABEL: v8i16: ; AVX1: ## BB#0: ; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrw $7, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $6, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $5, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $4, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $3, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $2, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $1, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpmovmskb %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v8i16: @@ -80,40 +45,15 @@ ; SSE2-SSSE3-LABEL: v4i32: ; SSE2-SSSE3: ## BB#0: ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm1, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE2-SSSE3-NEXT: movd %xmm1, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v4i32: ; AVX1: ## BB#0: ; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrd $3, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrd $2, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrd $1, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskps %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v4i32: @@ -132,41 +72,15 @@ ; SSE2-SSSE3-LABEL: v4f32: ; SSE2-SSSE3: ## BB#0: ; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1 -; SSE2-SSSE3-NEXT: movd %xmm1, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movaps %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm1, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v4f32: ; AVX1: ## BB#0: ; AVX1-NEXT: vcmpltps %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vextractps $3, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vextractps $2, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vextractps $1, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vextractps $0, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskps %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v4f32: @@ -185,110 +99,15 @@ ; SSE2-SSSE3-LABEL: v16i8: ; SSE2-SSSE3: ## BB#0: ; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %cl -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: movb %cl, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSE2-SSSE3-NEXT: ## kill: %AX %AX %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v16i8: ; AVX1: ## BB#0: ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrb $15, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $14, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $13, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $12, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $11, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $10, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $9, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $8, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $7, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $6, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $5, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $4, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $3, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $2, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $1, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrb $0, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: vpmovmskb %xmm0, %eax +; AVX1-NEXT: ## kill: %AX %AX %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v16i8: @@ -330,14 +149,8 @@ ; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 -; SSE2-SSSE3-NEXT: movq %xmm1, %rax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-SSSE3-NEXT: movq %xmm0, %rax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i8: @@ -353,13 +166,8 @@ ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskpd %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v2i8: @@ -406,14 +214,8 @@ ; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 -; SSE2-SSSE3-NEXT: movq %xmm1, %rax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-SSSE3-NEXT: movq %xmm0, %rax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i16: @@ -429,13 +231,8 @@ ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskpd %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v2i16: @@ -478,14 +275,8 @@ ; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 -; SSE2-SSSE3-NEXT: movq %xmm1, %rax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-SSSE3-NEXT: movq %xmm0, %rax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i32: @@ -499,13 +290,8 @@ ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskpd %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v2i32: @@ -538,26 +324,15 @@ ; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 -; SSE2-SSSE3-NEXT: movq %xmm1, %rax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-SSSE3-NEXT: movq %xmm0, %rax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i64: ; AVX1: ## BB#0: ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskpd %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v2i64: @@ -576,26 +351,15 @@ ; SSE2-SSSE3-LABEL: v2f64: ; SSE2-SSSE3: ## BB#0: ; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1 -; SSE2-SSSE3-NEXT: movq %xmm1, %rax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-SSSE3-NEXT: movq %xmm0, %rax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2f64: ; AVX1: ## BB#0: ; AVX1-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskpd %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v2f64: @@ -618,22 +382,8 @@ ; SSE2-SSSE3-NEXT: pslld $24, %xmm0 ; SSE2-SSSE3-NEXT: psrad $24, %xmm0 ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm1, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE2-SSSE3-NEXT: movd %xmm1, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v4i8: @@ -643,19 +393,8 @@ ; AVX1-NEXT: vpslld $24, %xmm0, %xmm0 ; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrd $3, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrd $2, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrd $1, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskps %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v4i8: @@ -682,22 +421,8 @@ ; SSE2-SSSE3-NEXT: pslld $16, %xmm0 ; SSE2-SSSE3-NEXT: psrad $16, %xmm0 ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm1, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE2-SSSE3-NEXT: movd %xmm1, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax +; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v4i16: @@ -707,19 +432,8 @@ ; AVX1-NEXT: vpslld $16, %xmm0, %xmm0 ; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrd $3, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrd $2, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrd $1, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vmovmskps %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v4i16: @@ -739,39 +453,30 @@ } define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) { -; SSE2-SSSE3-LABEL: v8i8: -; SSE2-SSSE3: ## BB#0: -; SSE2-SSSE3-NEXT: psllw $8, %xmm1 -; SSE2-SSSE3-NEXT: psraw $8, %xmm1 -; SSE2-SSSE3-NEXT: psllw $8, %xmm0 -; SSE2-SSSE3-NEXT: psraw $8, %xmm0 -; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: pextrw $7, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $6, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $5, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $3, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $2, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-SSSE3-NEXT: retq +; SSE2-LABEL: v8i8: +; SSE2: ## BB#0: +; SSE2-NEXT: psllw $8, %xmm1 +; SSE2-NEXT: psraw $8, %xmm1 +; SSE2-NEXT: psllw $8, %xmm0 +; SSE2-NEXT: psraw $8, %xmm0 +; SSE2-NEXT: pcmpgtw %xmm1, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: packuswb %xmm0, %xmm0 +; SSE2-NEXT: pmovmskb %xmm0, %eax +; SSE2-NEXT: ## kill: %AL %AL %EAX +; SSE2-NEXT: retq +; +; SSSE3-LABEL: v8i8: +; SSSE3: ## BB#0: +; SSSE3-NEXT: psllw $8, %xmm1 +; SSSE3-NEXT: psraw $8, %xmm1 +; SSSE3-NEXT: psllw $8, %xmm0 +; SSSE3-NEXT: psraw $8, %xmm0 +; SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 +; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSSE3-NEXT: retq ; ; AVX1-LABEL: v8i8: ; AVX1: ## BB#0: @@ -780,31 +485,9 @@ ; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpextrw $7, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $6, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $5, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $4, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $3, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $2, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vpextrw $1, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpmovmskb %xmm0, %eax +; AVX1-NEXT: ## kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX512-LABEL: v8i8: Index: test/CodeGen/X86/bitcast-setcc-256.ll =================================================================== --- test/CodeGen/X86/bitcast-setcc-256.ll +++ test/CodeGen/X86/bitcast-setcc-256.ll @@ -7,56 +7,12 @@ ; AVX2: ## BB#0: ; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpextrb $15, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $14, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $13, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $12, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $11, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $10, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $9, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $8, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $7, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $6, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $5, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $4, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $3, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $2, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $1, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $0, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpmovmskb %ymm0, %eax +; AVX2-NEXT: ## kill: %AX %AX %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -76,33 +32,8 @@ ; AVX2-LABEL: v8i32: ; AVX2: ## BB#0: ; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpextrw $7, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $6, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $5, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $4, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $3, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $2, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $1, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vmovmskps %ymm0, %eax +; AVX2-NEXT: ## kill: %AL %AL %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -122,33 +53,8 @@ ; AVX2-LABEL: v8f32: ; AVX2: ## BB#0: ; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpextrw $7, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $6, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $5, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $4, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $3, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $2, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrw $1, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vmovmskps %ymm0, %eax +; AVX2-NEXT: ## kill: %AL %AL %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -167,117 +73,8 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX2-LABEL: v32i8: ; AVX2: ## BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: Lcfi0: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: Lcfi1: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: Lcfi2: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $32, %rsp ; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpextrb $15, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $14, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $13, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $12, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $11, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $10, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $9, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $8, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $7, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $6, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $5, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $4, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $3, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $2, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $1, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $0, %xmm1, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $15, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $14, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $13, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $12, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $11, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $10, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $9, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $8, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $7, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $6, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $5, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $4, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $3, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $2, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $1, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: vpextrb $0, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: movb %al, (%rsp) -; AVX2-NEXT: movl (%rsp), %eax -; AVX2-NEXT: movq %rbp, %rsp -; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -296,21 +93,8 @@ ; AVX2-LABEL: v4i64: ; AVX2: ## BB#0: ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpextrd $3, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrd $2, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrd $1, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vmovmskpd %ymm0, %eax +; AVX2-NEXT: ## kill: %AL %AL %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -331,21 +115,8 @@ ; AVX2-LABEL: v4f64: ; AVX2: ## BB#0: ; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpextrd $3, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrd $2, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrd $1, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vmovmskpd %ymm0, %eax +; AVX2-NEXT: ## kill: %AL %AL %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ;