Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -34329,6 +34329,87 @@ return R.getValue(1); } +// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)). +// This is more or less the reverse of combineBitcastvxi1. +static SDValue +combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const X86Subtarget &Subtarget) { + unsigned Opcode = N->getOpcode(); + if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND && + Opcode != ISD::ANY_EXTEND) + return SDValue(); + if (!DCI.isBeforeLegalizeOps()) + return SDValue(); + if (!Subtarget.hasSSE2() || Subtarget.hasAVX512()) + return SDValue(); + + SDValue N0 = N->getOperand(0); + EVT VT = N->getValueType(0); + EVT SVT = VT.getScalarType(); + EVT InSVT = N0.getValueType().getScalarType(); + unsigned EltSizeInBits = SVT.getSizeInBits(); + + // Input type must be extending a bool vector (bit-casted from a scalar + // integer) to legal integer types. + if (!VT.isVector()) + return SDValue(); + if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8) + return SDValue(); + if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST) + return SDValue(); + + SDValue N00 = N0.getOperand(0); + EVT SclVT = N0.getOperand(0).getValueType(); + if (!SclVT.isScalarInteger()) + return SDValue(); + + SDLoc DL(N); + SDValue Vec; + SmallVector ShuffleMask; + unsigned NumElts = VT.getVectorNumElements(); + + // Broadcast the scalar integer to the vector elements. + if (SclVT.getSizeInBits() > EltSizeInBits) { + // If the scalar integer is greater than the vector element size, then we + // must split it down into sub-sections for broadcasting. For example: + // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections. + // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections. + assert((SclVT.getSizeInBits() % EltSizeInBits) == 0 && + "Unexpected integer scale"); + unsigned Scale = SclVT.getSizeInBits() / EltSizeInBits; + EVT BroadcastVT = + EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts / Scale); + Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00); + Vec = DAG.getBitcast(VT, Vec); + + for (unsigned i = 0; i != Scale; ++i) + ShuffleMask.append(EltSizeInBits, i); + } else { + // For smaller scalar integers, we can simply any-extend it to the vector + // element size (we don't care about the upper bits) and broadcast it to all + // elements. + SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT); + Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl); + ShuffleMask.append(NumElts, 0); + } + Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask); + + // Now, shift the relevant bit up to the MSB. + SmallVector ShiftToSign; + for (int i = 0; i != NumElts; ++i) { + int ShiftAmt = EltSizeInBits - ((i % EltSizeInBits) + 1); + ShiftToSign.push_back(DAG.getConstant(ShiftAmt, DL, SVT)); + } + Vec = DAG.getNode(ISD::SHL, DL, VT, Vec, + DAG.getBuildVector(VT, DL, ShiftToSign)); + + // Finally, splat the sign bit for SEXT, else shift it down to zero extend it. + Vec = DAG.getNode(Opcode == ISD::SIGN_EXTEND ? ISD::SRA : ISD::SRL, DL, VT, + Vec, DAG.getConstant(EltSizeInBits - 1, DL, VT)); + return Vec; +} + /// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or /// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating /// with UNDEFs) of the input to vectors of the same size as the target type @@ -34465,6 +34546,9 @@ if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget)) return V; + if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget)) + return V; + if (Subtarget.hasAVX() && VT.is256BitVector()) if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget)) return R; @@ -34586,6 +34670,9 @@ if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget)) return V; + if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget)) + return V; + if (VT.is256BitVector()) if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget)) return R; Index: test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll =================================================================== --- test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll +++ test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll @@ -12,33 +12,38 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) { ; SSE2-SSSE3-LABEL: ext_i2_2i64: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: andb $3, %dil -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shlq $62, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movq %rcx, %xmm1 -; SSE2-SSSE3-NEXT: shlq $63, %rax -; SSE2-SSSE3-NEXT: sarq $63, %rax -; SSE2-SSSE3-NEXT: movq %rax, %xmm0 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: # kill: %EDI %EDI %RDI +; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: psllq $62, %xmm1 +; SSE2-SSSE3-NEXT: psllq $63, %xmm0 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] +; SSE2-SSSE3-NEXT: psrad $31, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: ext_i2_2i64: -; AVX12: # BB#0: -; AVX12-NEXT: andb $3, %dil -; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $62, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vmovq %rcx, %xmm0 -; AVX12-NEXT: shlq $63, %rax -; AVX12-NEXT: sarq $63, %rax -; AVX12-NEXT: vmovq %rax, %xmm1 -; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX12-NEXT: retq +; AVX1-LABEL: ext_i2_2i64: +; AVX1: # BB#0: +; AVX1-NEXT: # kill: %EDI %EDI %RDI +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX1-NEXT: vpsllq $62, %xmm0, %xmm1 +; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i2_2i64: +; AVX2: # BB#0: +; AVX2-NEXT: # kill: %EDI %EDI %RDI +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i2_2i64: ; AVX512: # BB#0: @@ -58,50 +63,33 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) { ; SSE2-SSSE3-LABEL: ext_i4_4i32: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: andb $15, %dil -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shlq $60, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shlq $61, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shlq $62, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shlq $63, %rax -; SSE2-SSSE3-NEXT: sarq $63, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm0 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,1073741824,536870912,268435456] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: movd %edi, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] +; SSE2-SSSE3-NEXT: pmuludq %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-SSSE3-NEXT: psrad $31, %xmm0 ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: ext_i4_4i32: -; AVX12: # BB#0: -; AVX12-NEXT: andb $15, %dil -; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $62, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: movq %rax, %rdx -; AVX12-NEXT: shlq $63, %rdx -; AVX12-NEXT: sarq $63, %rdx -; AVX12-NEXT: vmovd %edx, %xmm0 -; AVX12-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $61, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: shlq $60, %rax -; AVX12-NEXT: sarq $63, %rax -; AVX12-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; AVX12-NEXT: retq +; AVX1-LABEL: ext_i4_4i32: +; AVX1: # BB#0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i4_4i32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0 +; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i4_4i32: ; AVX512: # BB#0: @@ -122,82 +110,29 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) { ; SSE2-SSSE3-LABEL: ext_i8_8i16: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shrq $7, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shlq $57, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shlq $58, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shlq $59, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shlq $60, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shlq $61, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shlq $62, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: shlq $63, %rax -; SSE2-SSSE3-NEXT: sarq $63, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: movd %edi, %xmm0 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-SSSE3-NEXT: pmullw {{.*}}(%rip), %xmm0 +; SSE2-SSSE3-NEXT: psraw $15, %xmm0 ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: ext_i8_8i16: -; AVX12: # BB#0: -; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $62, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: movq %rax, %rdx -; AVX12-NEXT: shlq $63, %rdx -; AVX12-NEXT: sarq $63, %rdx -; AVX12-NEXT: vmovd %edx, %xmm0 -; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $61, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $60, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $59, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $58, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $57, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: shrq $7, %rax -; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 -; AVX12-NEXT: retq +; AVX1-LABEL: ext_i8_8i16: +; AVX1: # BB#0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i8_8i16: +; AVX2: # BB#0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 +; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i8_8i16: ; AVX512: # BB#0: @@ -210,190 +145,93 @@ } define <16 x i8> @ext_i16_16i8(i16 %a0) { -; SSE2-SSSE3-LABEL: ext_i16_16i8: -; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: pushq %rbp -; SSE2-SSSE3-NEXT: .Lcfi0: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16 -; SSE2-SSSE3-NEXT: pushq %r15 -; SSE2-SSSE3-NEXT: .Lcfi1: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24 -; SSE2-SSSE3-NEXT: pushq %r14 -; SSE2-SSSE3-NEXT: .Lcfi2: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32 -; SSE2-SSSE3-NEXT: pushq %r13 -; SSE2-SSSE3-NEXT: .Lcfi3: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40 -; SSE2-SSSE3-NEXT: pushq %r12 -; SSE2-SSSE3-NEXT: .Lcfi4: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48 -; SSE2-SSSE3-NEXT: pushq %rbx -; SSE2-SSSE3-NEXT: .Lcfi5: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56 -; SSE2-SSSE3-NEXT: .Lcfi6: -; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56 -; SSE2-SSSE3-NEXT: .Lcfi7: -; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48 -; SSE2-SSSE3-NEXT: .Lcfi8: -; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40 -; SSE2-SSSE3-NEXT: .Lcfi9: -; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32 -; SSE2-SSSE3-NEXT: .Lcfi10: -; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24 -; SSE2-SSSE3-NEXT: .Lcfi11: -; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16 -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rax -; SSE2-SSSE3-NEXT: movq %rax, %r8 -; SSE2-SSSE3-NEXT: movq %rax, %r9 -; SSE2-SSSE3-NEXT: movq %rax, %r10 -; SSE2-SSSE3-NEXT: movq %rax, %r11 -; SSE2-SSSE3-NEXT: movq %rax, %r14 -; SSE2-SSSE3-NEXT: movq %rax, %r15 -; SSE2-SSSE3-NEXT: movq %rax, %r12 -; SSE2-SSSE3-NEXT: movq %rax, %r13 -; SSE2-SSSE3-NEXT: movq %rax, %rbx -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: movq %rax, %rdx -; SSE2-SSSE3-NEXT: movq %rax, %rsi -; SSE2-SSSE3-NEXT: movq %rax, %rdi -; SSE2-SSSE3-NEXT: movq %rax, %rbp -; SSE2-SSSE3-NEXT: shrq $15, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm0 -; SSE2-SSSE3-NEXT: movq %rax, %rbp -; SSE2-SSSE3-NEXT: movsbq %al, %rax -; SSE2-SSSE3-NEXT: shlq $49, %r8 -; SSE2-SSSE3-NEXT: sarq $63, %r8 -; SSE2-SSSE3-NEXT: movd %r8d, %xmm1 -; SSE2-SSSE3-NEXT: shlq $50, %r9 -; SSE2-SSSE3-NEXT: sarq $63, %r9 -; SSE2-SSSE3-NEXT: movd %r9d, %xmm2 -; SSE2-SSSE3-NEXT: shlq $51, %r10 -; SSE2-SSSE3-NEXT: sarq $63, %r10 -; SSE2-SSSE3-NEXT: movd %r10d, %xmm3 -; SSE2-SSSE3-NEXT: shlq $52, %r11 -; SSE2-SSSE3-NEXT: sarq $63, %r11 -; SSE2-SSSE3-NEXT: movd %r11d, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: shlq $53, %r14 -; SSE2-SSSE3-NEXT: sarq $63, %r14 -; SSE2-SSSE3-NEXT: movd %r14d, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-SSSE3-NEXT: shlq $54, %r15 -; SSE2-SSSE3-NEXT: sarq $63, %r15 -; SSE2-SSSE3-NEXT: movd %r15d, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; SSE2-SSSE3-NEXT: shlq $55, %r12 -; SSE2-SSSE3-NEXT: sarq $63, %r12 -; SSE2-SSSE3-NEXT: movd %r12d, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-SSSE3-NEXT: shlq $60, %r13 -; SSE2-SSSE3-NEXT: sarq $63, %r13 -; SSE2-SSSE3-NEXT: movd %r13d, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-SSSE3-NEXT: shlq $61, %rbx -; SSE2-SSSE3-NEXT: sarq $63, %rbx -; SSE2-SSSE3-NEXT: movd %ebx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: shlq $62, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; SSE2-SSSE3-NEXT: shlq $63, %rdx -; SSE2-SSSE3-NEXT: sarq $63, %rdx -; SSE2-SSSE3-NEXT: movd %edx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-SSSE3-NEXT: shlq $58, %rsi -; SSE2-SSSE3-NEXT: sarq $63, %rsi -; SSE2-SSSE3-NEXT: movd %esi, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] -; SSE2-SSSE3-NEXT: shlq $59, %rdi -; SSE2-SSSE3-NEXT: sarq $63, %rdi -; SSE2-SSSE3-NEXT: movd %edi, %xmm4 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] -; SSE2-SSSE3-NEXT: shlq $57, %rbp -; SSE2-SSSE3-NEXT: sarq $63, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm2 -; SSE2-SSSE3-NEXT: shrq $7, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-SSSE3-NEXT: popq %rbx -; SSE2-SSSE3-NEXT: popq %r12 -; SSE2-SSSE3-NEXT: popq %r13 -; SSE2-SSSE3-NEXT: popq %r14 -; SSE2-SSSE3-NEXT: popq %r15 -; SSE2-SSSE3-NEXT: popq %rbp -; SSE2-SSSE3-NEXT: retq +; SSE2-LABEL: ext_i16_16i8: +; SSE2: # BB#0: +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [49376,32928,16480,32,49376,32928,16480,32] +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm3 +; SSE2-NEXT: movd %edi, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,1,1,4,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm1, %xmm4 +; SSE2-NEXT: psllw $4, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: paddb %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm1, %xmm4 +; SSE2-NEXT: psllw $2, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: paddb %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: pandn %xmm1, %xmm2 +; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: ext_i16_16i8: +; SSSE3: # BB#0: +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [49376,32928,16480,32,49376,32928,16480,32] +; SSSE3-NEXT: pxor %xmm0, %xmm0 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpgtb %xmm2, %xmm3 +; SSSE3-NEXT: movd %edi, %xmm1 +; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pandn %xmm1, %xmm4 +; SSSE3-NEXT: psllw $4, %xmm1 +; SSSE3-NEXT: pand {{.*}}(%rip), %xmm1 +; SSSE3-NEXT: pand %xmm3, %xmm1 +; SSSE3-NEXT: por %xmm4, %xmm1 +; SSSE3-NEXT: paddb %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpgtb %xmm2, %xmm3 +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pandn %xmm1, %xmm4 +; SSSE3-NEXT: psllw $2, %xmm1 +; SSSE3-NEXT: pand {{.*}}(%rip), %xmm1 +; SSSE3-NEXT: pand %xmm3, %xmm1 +; SSSE3-NEXT: por %xmm4, %xmm1 +; SSSE3-NEXT: paddb %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpgtb %xmm2, %xmm3 +; SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSSE3-NEXT: pandn %xmm1, %xmm2 +; SSSE3-NEXT: paddb %xmm1, %xmm1 +; SSSE3-NEXT: pand %xmm3, %xmm1 +; SSSE3-NEXT: por %xmm2, %xmm1 +; SSSE3-NEXT: pcmpgtb %xmm1, %xmm0 +; SSSE3-NEXT: retq ; ; AVX12-LABEL: ext_i16_16i8: ; AVX12: # BB#0: -; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movswq -{{[0-9]+}}(%rsp), %rax -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $62, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: movq %rax, %rdx -; AVX12-NEXT: shlq $63, %rdx -; AVX12-NEXT: sarq $63, %rdx -; AVX12-NEXT: vmovd %edx, %xmm0 -; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $61, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $60, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $59, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $58, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $57, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movsbq %al, %rcx -; AVX12-NEXT: shrq $7, %rcx -; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $55, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $54, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $53, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $52, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $51, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $50, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movq %rax, %rcx -; AVX12-NEXT: shlq $49, %rcx -; AVX12-NEXT: sarq $63, %rcx -; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: shrq $15, %rax -; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX12-NEXT: vmovd %edi, %xmm0 +; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] +; AVX12-NEXT: vpsllw $4, %xmm0, %xmm1 +; AVX12-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX12-NEXT: vmovdqa {{.*#+}} xmm2 = [49376,32928,16480,32,49376,32928,16480,32] +; AVX12-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpsllw $2, %xmm0, %xmm1 +; AVX12-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX12-NEXT: vpaddb %xmm2, %xmm2, %xmm2 +; AVX12-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpaddb %xmm0, %xmm0, %xmm1 +; AVX12-NEXT: vpaddb %xmm2, %xmm2, %xmm2 +; AVX12-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX12-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 ; AVX12-NEXT: retq ; ; AVX512-LABEL: ext_i16_16i8: @@ -413,80 +251,49 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) { ; SSE2-SSSE3-LABEL: ext_i4_4i64: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: andb $15, %dil -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-SSSE3-NEXT: movd %eax, %xmm2 -; SSE2-SSSE3-NEXT: shrl %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm0 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3] -; SSE2-SSSE3-NEXT: psllq $63, %xmm0 +; SSE2-SSSE3-NEXT: # kill: %EDI %EDI %RDI +; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: psllq $62, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: psllq $63, %xmm2 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] ; SSE2-SSSE3-NEXT: psrad $31, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3] -; SSE2-SSSE3-NEXT: psllq $63, %xmm1 -; SSE2-SSSE3-NEXT: psrad $31, %xmm1 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: psllq $60, %xmm2 +; SSE2-SSSE3-NEXT: psllq $61, %xmm1 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1] +; SSE2-SSSE3-NEXT: psrad $31, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i4_4i64: ; AVX1: # BB#0: -; AVX1-NEXT: andb $15, %dil -; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $60, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vmovq %rcx, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $61, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vmovq %rcx, %xmm1 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $62, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vmovq %rcx, %xmm1 -; AVX1-NEXT: shlq $63, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vmovq %rax, %xmm2 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: # kill: %EDI %EDI %RDI +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX1-NEXT: vpsllq $60, %xmm0, %xmm1 +; AVX1-NEXT: vpsllq $61, %xmm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpsllq $62, %xmm0, %xmm3 +; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i4_4i64: ; AVX2: # BB#0: -; AVX2-NEXT: andb $15, %dil -; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $60, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vmovq %rcx, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $61, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vmovq %rcx, %xmm1 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $62, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vmovq %rcx, %xmm1 -; AVX2-NEXT: shlq $63, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vmovq %rax, %xmm2 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: # kill: %EDI %EDI %RDI +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0 +; AVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i4_4i64: @@ -506,126 +313,43 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) { ; SSE2-SSSE3-LABEL: ext_i8_8i32: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: shrl $7, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm3 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSE2-SSSE3-NEXT: pslld $31, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,1073741824,536870912,268435456] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: movd %edi, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] +; SSE2-SSSE3-NEXT: pmuludq %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-SSSE3-NEXT: psrad $31, %xmm0 -; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: pslld $31, %xmm1 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [134217728,67108864,33554432,16777216] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm2, %xmm3 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; SSE2-SSSE3-NEXT: psrad $31, %xmm1 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i8_8i32: ; AVX1: # BB#0: -; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $58, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: movq %rax, %rdx -; AVX1-NEXT: shlq $59, %rdx -; AVX1-NEXT: sarq $63, %rdx -; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $57, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq $7, %rcx -; AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $62, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: movq %rax, %rdx -; AVX1-NEXT: shlq $63, %rdx -; AVX1-NEXT: sarq $63, %rdx -; AVX1-NEXT: vmovd %edx, %xmm1 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $61, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $60, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i8_8i32: ; AVX2: # BB#0: -; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $58, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: movq %rax, %rdx -; AVX2-NEXT: shlq $59, %rdx -; AVX2-NEXT: sarq $63, %rdx -; AVX2-NEXT: vmovd %edx, %xmm0 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $57, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq $7, %rcx -; AVX2-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $62, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: movq %rax, %rdx -; AVX2-NEXT: shlq $63, %rdx -; AVX2-NEXT: sarq $63, %rdx -; AVX2-NEXT: vmovd %edx, %xmm1 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $61, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $60, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0 +; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i8_8i32: @@ -642,300 +366,34 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) { ; SSE2-SSSE3-LABEL: ext_i16_16i16: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-SSSE3-NEXT: psllw $15, %xmm0 +; SSE2-SSSE3-NEXT: movd %edi, %xmm0 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [32768,16384,8192,4096,2048,1024,512,256] +; SSE2-SSSE3-NEXT: pmullw %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: psraw $15, %xmm0 -; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; SSE2-SSSE3-NEXT: psllw $15, %xmm1 +; SSE2-SSSE3-NEXT: pmullw {{.*}}(%rip), %xmm1 ; SSE2-SSSE3-NEXT: psraw $15, %xmm1 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i16_16i16: ; AVX1: # BB#0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: .Lcfi0: -; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: pushq %r15 -; AVX1-NEXT: .Lcfi1: -; AVX1-NEXT: .cfi_def_cfa_offset 24 -; AVX1-NEXT: pushq %r14 -; AVX1-NEXT: .Lcfi2: -; AVX1-NEXT: .cfi_def_cfa_offset 32 -; AVX1-NEXT: pushq %r13 -; AVX1-NEXT: .Lcfi3: -; AVX1-NEXT: .cfi_def_cfa_offset 40 -; AVX1-NEXT: pushq %r12 -; AVX1-NEXT: .Lcfi4: -; AVX1-NEXT: .cfi_def_cfa_offset 48 -; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: .Lcfi5: -; AVX1-NEXT: .cfi_def_cfa_offset 56 -; AVX1-NEXT: .Lcfi6: -; AVX1-NEXT: .cfi_offset %rbx, -56 -; AVX1-NEXT: .Lcfi7: -; AVX1-NEXT: .cfi_offset %r12, -48 -; AVX1-NEXT: .Lcfi8: -; AVX1-NEXT: .cfi_offset %r13, -40 -; AVX1-NEXT: .Lcfi9: -; AVX1-NEXT: .cfi_offset %r14, -32 -; AVX1-NEXT: .Lcfi10: -; AVX1-NEXT: .cfi_offset %r15, -24 -; AVX1-NEXT: .Lcfi11: -; AVX1-NEXT: .cfi_offset %rbp, -16 -; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movswq -{{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $55, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: movq %rax, %r8 -; AVX1-NEXT: movq %rax, %r10 -; AVX1-NEXT: movq %rax, %r11 -; AVX1-NEXT: movq %rax, %r14 -; AVX1-NEXT: movq %rax, %r15 -; AVX1-NEXT: movq %rax, %r9 -; AVX1-NEXT: movq %rax, %r12 -; AVX1-NEXT: movq %rax, %r13 -; AVX1-NEXT: movq %rax, %rbx -; AVX1-NEXT: movq %rax, %rdi -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: movq %rax, %rdx -; AVX1-NEXT: movq %rax, %rsi -; AVX1-NEXT: movsbq %al, %rbp -; AVX1-NEXT: shlq $54, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 -; AVX1-NEXT: shlq $53, %r8 -; AVX1-NEXT: sarq $63, %r8 -; AVX1-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0 -; AVX1-NEXT: shlq $52, %r10 -; AVX1-NEXT: sarq $63, %r10 -; AVX1-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0 -; AVX1-NEXT: shlq $51, %r11 -; AVX1-NEXT: sarq $63, %r11 -; AVX1-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0 -; AVX1-NEXT: shlq $50, %r14 -; AVX1-NEXT: sarq $63, %r14 -; AVX1-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 -; AVX1-NEXT: shlq $49, %r15 -; AVX1-NEXT: sarq $63, %r15 -; AVX1-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0 -; AVX1-NEXT: shrq $15, %r9 -; AVX1-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0 -; AVX1-NEXT: shlq $63, %r13 -; AVX1-NEXT: sarq $63, %r13 -; AVX1-NEXT: vmovd %r13d, %xmm1 -; AVX1-NEXT: shlq $62, %r12 -; AVX1-NEXT: sarq $63, %r12 -; AVX1-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $61, %rbx -; AVX1-NEXT: sarq $63, %rbx -; AVX1-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $60, %rdi -; AVX1-NEXT: sarq $63, %rdi -; AVX1-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1 -; AVX1-NEXT: shlq $59, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $58, %rdx -; AVX1-NEXT: sarq $63, %rdx -; AVX1-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $57, %rsi -; AVX1-NEXT: sarq $63, %rsi -; AVX1-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1 -; AVX1-NEXT: shrq $7, %rbp -; AVX1-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: popq %rbx -; AVX1-NEXT: popq %r12 -; AVX1-NEXT: popq %r13 -; AVX1-NEXT: popq %r14 -; AVX1-NEXT: popq %r15 -; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i16_16i16: ; AVX2: # BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .Lcfi0: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: .Lcfi1: -; AVX2-NEXT: .cfi_def_cfa_offset 24 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: .Lcfi2: -; AVX2-NEXT: .cfi_def_cfa_offset 32 -; AVX2-NEXT: pushq %r13 -; AVX2-NEXT: .Lcfi3: -; AVX2-NEXT: .cfi_def_cfa_offset 40 -; AVX2-NEXT: pushq %r12 -; AVX2-NEXT: .Lcfi4: -; AVX2-NEXT: .cfi_def_cfa_offset 48 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: .Lcfi5: -; AVX2-NEXT: .cfi_def_cfa_offset 56 -; AVX2-NEXT: .Lcfi6: -; AVX2-NEXT: .cfi_offset %rbx, -56 -; AVX2-NEXT: .Lcfi7: -; AVX2-NEXT: .cfi_offset %r12, -48 -; AVX2-NEXT: .Lcfi8: -; AVX2-NEXT: .cfi_offset %r13, -40 -; AVX2-NEXT: .Lcfi9: -; AVX2-NEXT: .cfi_offset %r14, -32 -; AVX2-NEXT: .Lcfi10: -; AVX2-NEXT: .cfi_offset %r15, -24 -; AVX2-NEXT: .Lcfi11: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movswq -{{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $55, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vmovd %ecx, %xmm0 -; AVX2-NEXT: movq %rax, %r8 -; AVX2-NEXT: movq %rax, %r10 -; AVX2-NEXT: movq %rax, %r11 -; AVX2-NEXT: movq %rax, %r14 -; AVX2-NEXT: movq %rax, %r15 -; AVX2-NEXT: movq %rax, %r9 -; AVX2-NEXT: movq %rax, %r12 -; AVX2-NEXT: movq %rax, %r13 -; AVX2-NEXT: movq %rax, %rbx -; AVX2-NEXT: movq %rax, %rdi -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: movq %rax, %rdx -; AVX2-NEXT: movq %rax, %rsi -; AVX2-NEXT: movsbq %al, %rbp -; AVX2-NEXT: shlq $54, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 -; AVX2-NEXT: shlq $53, %r8 -; AVX2-NEXT: sarq $63, %r8 -; AVX2-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0 -; AVX2-NEXT: shlq $52, %r10 -; AVX2-NEXT: sarq $63, %r10 -; AVX2-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0 -; AVX2-NEXT: shlq $51, %r11 -; AVX2-NEXT: sarq $63, %r11 -; AVX2-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0 -; AVX2-NEXT: shlq $50, %r14 -; AVX2-NEXT: sarq $63, %r14 -; AVX2-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 -; AVX2-NEXT: shlq $49, %r15 -; AVX2-NEXT: sarq $63, %r15 -; AVX2-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0 -; AVX2-NEXT: shrq $15, %r9 -; AVX2-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0 -; AVX2-NEXT: shlq $63, %r13 -; AVX2-NEXT: sarq $63, %r13 -; AVX2-NEXT: vmovd %r13d, %xmm1 -; AVX2-NEXT: shlq $62, %r12 -; AVX2-NEXT: sarq $63, %r12 -; AVX2-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $61, %rbx -; AVX2-NEXT: sarq $63, %rbx -; AVX2-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $60, %rdi -; AVX2-NEXT: sarq $63, %rdi -; AVX2-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1 -; AVX2-NEXT: shlq $59, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $58, %rdx -; AVX2-NEXT: sarq $63, %rdx -; AVX2-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $57, %rsi -; AVX2-NEXT: sarq $63, %rsi -; AVX2-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1 -; AVX2-NEXT: shrq $7, %rbp -; AVX2-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r12 -; AVX2-NEXT: popq %r13 -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 +; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i16_16i16: @@ -951,539 +409,116 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) { ; SSE2-SSSE3-LABEL: ext_i32_32i8: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: pushq %rbp -; SSE2-SSSE3-NEXT: .Lcfi12: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16 -; SSE2-SSSE3-NEXT: pushq %r15 -; SSE2-SSSE3-NEXT: .Lcfi13: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24 -; SSE2-SSSE3-NEXT: pushq %r14 -; SSE2-SSSE3-NEXT: .Lcfi14: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32 -; SSE2-SSSE3-NEXT: pushq %r13 -; SSE2-SSSE3-NEXT: .Lcfi15: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40 -; SSE2-SSSE3-NEXT: pushq %r12 -; SSE2-SSSE3-NEXT: .Lcfi16: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48 -; SSE2-SSSE3-NEXT: pushq %rbx -; SSE2-SSSE3-NEXT: .Lcfi17: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56 -; SSE2-SSSE3-NEXT: .Lcfi18: -; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56 -; SSE2-SSSE3-NEXT: .Lcfi19: -; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48 -; SSE2-SSSE3-NEXT: .Lcfi20: -; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40 -; SSE2-SSSE3-NEXT: .Lcfi21: -; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32 -; SSE2-SSSE3-NEXT: .Lcfi22: -; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24 -; SSE2-SSSE3-NEXT: .Lcfi23: -; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16 -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: shrl $16, %edi -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rbx -; SSE2-SSSE3-NEXT: movq %rbx, %r8 -; SSE2-SSSE3-NEXT: movq %rbx, %r9 -; SSE2-SSSE3-NEXT: movq %rbx, %r10 -; SSE2-SSSE3-NEXT: movq %rbx, %r11 -; SSE2-SSSE3-NEXT: movq %rbx, %r14 -; SSE2-SSSE3-NEXT: movq %rbx, %r15 -; SSE2-SSSE3-NEXT: movq %rbx, %r12 -; SSE2-SSSE3-NEXT: movq %rbx, %r13 -; SSE2-SSSE3-NEXT: movq %rbx, %rdi -; SSE2-SSSE3-NEXT: movq %rbx, %rcx -; SSE2-SSSE3-NEXT: movq %rbx, %rdx -; SSE2-SSSE3-NEXT: movq %rbx, %rbp -; SSE2-SSSE3-NEXT: movq %rbx, %rsi -; SSE2-SSSE3-NEXT: movq %rbx, %rax -; SSE2-SSSE3-NEXT: shrq $15, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm0 -; SSE2-SSSE3-NEXT: movq %rbx, %rax -; SSE2-SSSE3-NEXT: movsbq %bl, %rbx -; SSE2-SSSE3-NEXT: shlq $49, %r8 -; SSE2-SSSE3-NEXT: sarq $63, %r8 -; SSE2-SSSE3-NEXT: movd %r8d, %xmm15 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7] -; SSE2-SSSE3-NEXT: shlq $50, %r9 -; SSE2-SSSE3-NEXT: sarq $63, %r9 -; SSE2-SSSE3-NEXT: movd %r9d, %xmm8 -; SSE2-SSSE3-NEXT: shlq $51, %r10 -; SSE2-SSSE3-NEXT: sarq $63, %r10 -; SSE2-SSSE3-NEXT: movd %r10d, %xmm3 -; SSE2-SSSE3-NEXT: shlq $52, %r11 -; SSE2-SSSE3-NEXT: sarq $63, %r11 -; SSE2-SSSE3-NEXT: movd %r11d, %xmm9 -; SSE2-SSSE3-NEXT: shlq $53, %r14 -; SSE2-SSSE3-NEXT: sarq $63, %r14 -; SSE2-SSSE3-NEXT: movd %r14d, %xmm6 -; SSE2-SSSE3-NEXT: shlq $54, %r15 -; SSE2-SSSE3-NEXT: sarq $63, %r15 -; SSE2-SSSE3-NEXT: movd %r15d, %xmm10 -; SSE2-SSSE3-NEXT: shlq $55, %r12 -; SSE2-SSSE3-NEXT: sarq $63, %r12 -; SSE2-SSSE3-NEXT: movd %r12d, %xmm1 -; SSE2-SSSE3-NEXT: shlq $60, %r13 -; SSE2-SSSE3-NEXT: sarq $63, %r13 -; SSE2-SSSE3-NEXT: movd %r13d, %xmm11 -; SSE2-SSSE3-NEXT: shlq $61, %rdi -; SSE2-SSSE3-NEXT: sarq $63, %rdi -; SSE2-SSSE3-NEXT: movd %edi, %xmm5 -; SSE2-SSSE3-NEXT: shlq $62, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm12 -; SSE2-SSSE3-NEXT: shlq $63, %rdx -; SSE2-SSSE3-NEXT: sarq $63, %rdx -; SSE2-SSSE3-NEXT: movd %edx, %xmm0 -; SSE2-SSSE3-NEXT: shlq $58, %rbp -; SSE2-SSSE3-NEXT: sarq $63, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm13 -; SSE2-SSSE3-NEXT: shlq $59, %rsi -; SSE2-SSSE3-NEXT: sarq $63, %rsi -; SSE2-SSSE3-NEXT: movd %esi, %xmm7 -; SSE2-SSSE3-NEXT: shlq $57, %rax -; SSE2-SSSE3-NEXT: sarq $63, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: shrq $7, %rbx -; SSE2-SSSE3-NEXT: movd %ebx, %xmm14 -; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi -; SSE2-SSSE3-NEXT: movq %rsi, %r8 -; SSE2-SSSE3-NEXT: movq %rsi, %r9 -; SSE2-SSSE3-NEXT: movq %rsi, %r10 -; SSE2-SSSE3-NEXT: movq %rsi, %r11 -; SSE2-SSSE3-NEXT: movq %rsi, %r14 -; SSE2-SSSE3-NEXT: movq %rsi, %r15 -; SSE2-SSSE3-NEXT: movq %rsi, %r12 -; SSE2-SSSE3-NEXT: movq %rsi, %r13 -; SSE2-SSSE3-NEXT: movq %rsi, %rbx -; SSE2-SSSE3-NEXT: movq %rsi, %rax -; SSE2-SSSE3-NEXT: movq %rsi, %rcx -; SSE2-SSSE3-NEXT: movq %rsi, %rdx -; SSE2-SSSE3-NEXT: movq %rsi, %rdi -; SSE2-SSSE3-NEXT: movq %rsi, %rbp -; SSE2-SSSE3-NEXT: shrq $15, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm2 -; SSE2-SSSE3-NEXT: movq %rsi, %rbp -; SSE2-SSSE3-NEXT: movsbq %sil, %rsi -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7] -; SSE2-SSSE3-NEXT: shlq $49, %r8 -; SSE2-SSSE3-NEXT: sarq $63, %r8 -; SSE2-SSSE3-NEXT: movd %r8d, %xmm3 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3] -; SSE2-SSSE3-NEXT: shlq $50, %r9 -; SSE2-SSSE3-NEXT: sarq $63, %r9 -; SSE2-SSSE3-NEXT: movd %r9d, %xmm4 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] -; SSE2-SSSE3-NEXT: shlq $51, %r10 -; SSE2-SSSE3-NEXT: sarq $63, %r10 -; SSE2-SSSE3-NEXT: movd %r10d, %xmm5 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-SSSE3-NEXT: shlq $52, %r11 -; SSE2-SSSE3-NEXT: sarq $63, %r11 -; SSE2-SSSE3-NEXT: movd %r11d, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-SSSE3-NEXT: shlq $53, %r14 -; SSE2-SSSE3-NEXT: sarq $63, %r14 -; SSE2-SSSE3-NEXT: movd %r14d, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] -; SSE2-SSSE3-NEXT: shlq $54, %r15 -; SSE2-SSSE3-NEXT: sarq $63, %r15 -; SSE2-SSSE3-NEXT: movd %r15d, %xmm4 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3] -; SSE2-SSSE3-NEXT: shlq $55, %r12 -; SSE2-SSSE3-NEXT: sarq $63, %r12 -; SSE2-SSSE3-NEXT: movd %r12d, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: shlq $60, %r13 -; SSE2-SSSE3-NEXT: sarq $63, %r13 -; SSE2-SSSE3-NEXT: movd %r13d, %xmm6 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] -; SSE2-SSSE3-NEXT: shlq $61, %rbx -; SSE2-SSSE3-NEXT: sarq $63, %rbx -; SSE2-SSSE3-NEXT: movd %ebx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-SSSE3-NEXT: shlq $62, %rax -; SSE2-SSSE3-NEXT: sarq $63, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm2 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] -; SSE2-SSSE3-NEXT: shlq $63, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7] -; SSE2-SSSE3-NEXT: shlq $58, %rdx -; SSE2-SSSE3-NEXT: sarq $63, %rdx -; SSE2-SSSE3-NEXT: movd %edx, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-SSSE3-NEXT: shlq $59, %rdi -; SSE2-SSSE3-NEXT: sarq $63, %rdi -; SSE2-SSSE3-NEXT: movd %edi, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] -; SSE2-SSSE3-NEXT: shlq $57, %rbp -; SSE2-SSSE3-NEXT: sarq $63, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm4 -; SSE2-SSSE3-NEXT: shrq $7, %rsi -; SSE2-SSSE3-NEXT: movd %esi, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] -; SSE2-SSSE3-NEXT: popq %rbx -; SSE2-SSSE3-NEXT: popq %r12 -; SSE2-SSSE3-NEXT: popq %r13 -; SSE2-SSSE3-NEXT: popq %r14 -; SSE2-SSSE3-NEXT: popq %r15 -; SSE2-SSSE3-NEXT: popq %rbp +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [49376,32928,16480,32,49376,32928,16480,32] +; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm1 +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: movd %edi, %xmm8 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[0,0,1,1,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm3 +; SSE2-SSSE3-NEXT: pandn %xmm6, %xmm3 +; SSE2-SSSE3-NEXT: psllw $4, %xmm6 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm5 +; SSE2-SSSE3-NEXT: pand %xmm5, %xmm6 +; SSE2-SSSE3-NEXT: por %xmm3, %xmm6 +; SSE2-SSSE3-NEXT: paddb %xmm0, %xmm0 +; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm4 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm0, %xmm4 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: pandn %xmm6, %xmm3 +; SSE2-SSSE3-NEXT: psllw $2, %xmm6 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm7 +; SSE2-SSSE3-NEXT: pand %xmm7, %xmm6 +; SSE2-SSSE3-NEXT: por %xmm3, %xmm6 +; SSE2-SSSE3-NEXT: paddb %xmm0, %xmm0 +; SSE2-SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm0, %xmm3 +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: pandn %xmm6, %xmm0 +; SSE2-SSSE3-NEXT: paddb %xmm6, %xmm6 +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm6 +; SSE2-SSSE3-NEXT: por %xmm0, %xmm6 +; SSE2-SSSE3-NEXT: pxor %xmm0, %xmm0 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm6, %xmm0 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[2,2,3,3,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,1,1] +; SSE2-SSSE3-NEXT: pandn %xmm6, %xmm2 +; SSE2-SSSE3-NEXT: psllw $4, %xmm6 +; SSE2-SSSE3-NEXT: pand %xmm5, %xmm6 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm6 +; SSE2-SSSE3-NEXT: pandn %xmm6, %xmm4 +; SSE2-SSSE3-NEXT: psllw $2, %xmm6 +; SSE2-SSSE3-NEXT: pand %xmm7, %xmm6 +; SSE2-SSSE3-NEXT: por %xmm4, %xmm6 +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm6, %xmm2 +; SSE2-SSSE3-NEXT: paddb %xmm6, %xmm6 +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm6 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm6 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm6, %xmm1 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i32_32i8: ; AVX1: # BB#0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: .Lcfi12: -; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: .Lcfi13: -; AVX1-NEXT: .cfi_offset %rbp, -16 -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: .Lcfi14: -; AVX1-NEXT: .cfi_def_cfa_register %rbp -; AVX1-NEXT: pushq %r15 -; AVX1-NEXT: pushq %r14 -; AVX1-NEXT: pushq %r13 -; AVX1-NEXT: pushq %r12 -; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $64, %rsp -; AVX1-NEXT: .Lcfi15: -; AVX1-NEXT: .cfi_offset %rbx, -56 -; AVX1-NEXT: .Lcfi16: -; AVX1-NEXT: .cfi_offset %r12, -48 -; AVX1-NEXT: .Lcfi17: -; AVX1-NEXT: .cfi_offset %r13, -40 -; AVX1-NEXT: .Lcfi18: -; AVX1-NEXT: .cfi_offset %r14, -32 -; AVX1-NEXT: .Lcfi19: -; AVX1-NEXT: .cfi_offset %r15, -24 -; AVX1-NEXT: movl %edi, (%rsp) -; AVX1-NEXT: movslq (%rsp), %rdx -; AVX1-NEXT: movq %rdx, %rcx -; AVX1-NEXT: shlq $47, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: movq %rdx, %r8 -; AVX1-NEXT: movq %rdx, %rcx -; AVX1-NEXT: movq %rdx, %rdi -; AVX1-NEXT: movq %rdx, %r13 -; AVX1-NEXT: movq %rdx, %rsi -; AVX1-NEXT: movq %rdx, %r10 -; AVX1-NEXT: movq %rdx, %r11 -; AVX1-NEXT: movq %rdx, %r9 -; AVX1-NEXT: movq %rdx, %rbx -; AVX1-NEXT: movq %rdx, %r14 -; AVX1-NEXT: movq %rdx, %r15 -; AVX1-NEXT: movq %rdx, %r12 -; AVX1-NEXT: movq %rdx, %rax -; AVX1-NEXT: shlq $46, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX1-NEXT: shlq $45, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: shlq $44, %r8 -; AVX1-NEXT: sarq $63, %r8 -; AVX1-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r8 -; AVX1-NEXT: shlq $43, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %rcx -; AVX1-NEXT: shlq $42, %rdi -; AVX1-NEXT: sarq $63, %rdi -; AVX1-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %rdi -; AVX1-NEXT: shlq $41, %r13 -; AVX1-NEXT: sarq $63, %r13 -; AVX1-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r13 -; AVX1-NEXT: shlq $40, %rsi -; AVX1-NEXT: sarq $63, %rsi -; AVX1-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %rsi -; AVX1-NEXT: shlq $39, %r10 -; AVX1-NEXT: sarq $63, %r10 -; AVX1-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r10 -; AVX1-NEXT: shlq $38, %r11 -; AVX1-NEXT: sarq $63, %r11 -; AVX1-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0 -; AVX1-NEXT: movsbq %dl, %rax -; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: shlq $37, %r9 -; AVX1-NEXT: sarq $63, %r9 -; AVX1-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r9 -; AVX1-NEXT: shlq $36, %rbx -; AVX1-NEXT: sarq $63, %rbx -; AVX1-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %rbx -; AVX1-NEXT: shlq $35, %r14 -; AVX1-NEXT: sarq $63, %r14 -; AVX1-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r14 -; AVX1-NEXT: shlq $34, %r15 -; AVX1-NEXT: sarq $63, %r15 -; AVX1-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r15 -; AVX1-NEXT: shlq $33, %r12 -; AVX1-NEXT: sarq $63, %r12 -; AVX1-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r12 -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX1-NEXT: shrq $31, %rax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %rax -; AVX1-NEXT: shlq $63, %r8 -; AVX1-NEXT: sarq $63, %r8 -; AVX1-NEXT: vmovd %r8d, %xmm1 -; AVX1-NEXT: movq %rdx, %r8 -; AVX1-NEXT: movswq %dx, %rdx -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload -; AVX1-NEXT: shlq $62, %r11 -; AVX1-NEXT: sarq $63, %r11 -; AVX1-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $61, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $60, %rdi -; AVX1-NEXT: sarq $63, %rdi -; AVX1-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1 -; AVX1-NEXT: shlq $59, %r13 -; AVX1-NEXT: sarq $63, %r13 -; AVX1-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $58, %rsi -; AVX1-NEXT: sarq $63, %rsi -; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1 -; AVX1-NEXT: shlq $57, %r10 -; AVX1-NEXT: sarq $63, %r10 -; AVX1-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1 -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; AVX1-NEXT: shrq $7, %rcx -; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $55, %r9 -; AVX1-NEXT: sarq $63, %r9 -; AVX1-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $54, %rbx -; AVX1-NEXT: sarq $63, %rbx -; AVX1-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $53, %r14 -; AVX1-NEXT: sarq $63, %r14 -; AVX1-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $52, %r15 -; AVX1-NEXT: sarq $63, %r15 -; AVX1-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $51, %r12 -; AVX1-NEXT: sarq $63, %r12 -; AVX1-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $50, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX1-NEXT: shlq $49, %r8 -; AVX1-NEXT: sarq $63, %r8 -; AVX1-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1 -; AVX1-NEXT: shrq $15, %rdx -; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: leaq -40(%rbp), %rsp -; AVX1-NEXT: popq %rbx -; AVX1-NEXT: popq %r12 -; AVX1-NEXT: popq %r13 -; AVX1-NEXT: popq %r14 -; AVX1-NEXT: popq %r15 -; AVX1-NEXT: popq %rbp +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[2,2,3,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpand %xmm8, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [49376,32928,16480,32,49376,32928,16480,32] +; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2 +; AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7 +; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vpsllw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsllw $2, %xmm0, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i32_32i8: ; AVX2: # BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .Lcfi12: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .Lcfi13: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .Lcfi14: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %r13 -; AVX2-NEXT: pushq %r12 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $64, %rsp -; AVX2-NEXT: .Lcfi15: -; AVX2-NEXT: .cfi_offset %rbx, -56 -; AVX2-NEXT: .Lcfi16: -; AVX2-NEXT: .cfi_offset %r12, -48 -; AVX2-NEXT: .Lcfi17: -; AVX2-NEXT: .cfi_offset %r13, -40 -; AVX2-NEXT: .Lcfi18: -; AVX2-NEXT: .cfi_offset %r14, -32 -; AVX2-NEXT: .Lcfi19: -; AVX2-NEXT: .cfi_offset %r15, -24 -; AVX2-NEXT: movl %edi, (%rsp) -; AVX2-NEXT: movslq (%rsp), %rdx -; AVX2-NEXT: movq %rdx, %rcx -; AVX2-NEXT: shlq $47, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vmovd %ecx, %xmm0 -; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: movq %rdx, %r8 -; AVX2-NEXT: movq %rdx, %rcx -; AVX2-NEXT: movq %rdx, %rdi -; AVX2-NEXT: movq %rdx, %r13 -; AVX2-NEXT: movq %rdx, %rsi -; AVX2-NEXT: movq %rdx, %r10 -; AVX2-NEXT: movq %rdx, %r11 -; AVX2-NEXT: movq %rdx, %r9 -; AVX2-NEXT: movq %rdx, %rbx -; AVX2-NEXT: movq %rdx, %r14 -; AVX2-NEXT: movq %rdx, %r15 -; AVX2-NEXT: movq %rdx, %r12 -; AVX2-NEXT: movq %rdx, %rax -; AVX2-NEXT: shlq $46, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX2-NEXT: shlq $45, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: shlq $44, %r8 -; AVX2-NEXT: sarq $63, %r8 -; AVX2-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r8 -; AVX2-NEXT: shlq $43, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %rcx -; AVX2-NEXT: shlq $42, %rdi -; AVX2-NEXT: sarq $63, %rdi -; AVX2-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %rdi -; AVX2-NEXT: shlq $41, %r13 -; AVX2-NEXT: sarq $63, %r13 -; AVX2-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r13 -; AVX2-NEXT: shlq $40, %rsi -; AVX2-NEXT: sarq $63, %rsi -; AVX2-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %rsi -; AVX2-NEXT: shlq $39, %r10 -; AVX2-NEXT: sarq $63, %r10 -; AVX2-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r10 -; AVX2-NEXT: shlq $38, %r11 -; AVX2-NEXT: sarq $63, %r11 -; AVX2-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0 -; AVX2-NEXT: movsbq %dl, %rax -; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: shlq $37, %r9 -; AVX2-NEXT: sarq $63, %r9 -; AVX2-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r9 -; AVX2-NEXT: shlq $36, %rbx -; AVX2-NEXT: sarq $63, %rbx -; AVX2-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %rbx -; AVX2-NEXT: shlq $35, %r14 -; AVX2-NEXT: sarq $63, %r14 -; AVX2-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r14 -; AVX2-NEXT: shlq $34, %r15 -; AVX2-NEXT: sarq $63, %r15 -; AVX2-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r15 -; AVX2-NEXT: shlq $33, %r12 -; AVX2-NEXT: sarq $63, %r12 -; AVX2-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r12 -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX2-NEXT: shrq $31, %rax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %rax -; AVX2-NEXT: shlq $63, %r8 -; AVX2-NEXT: sarq $63, %r8 -; AVX2-NEXT: vmovd %r8d, %xmm1 -; AVX2-NEXT: movq %rdx, %r8 -; AVX2-NEXT: movswq %dx, %rdx -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload -; AVX2-NEXT: shlq $62, %r11 -; AVX2-NEXT: sarq $63, %r11 -; AVX2-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $61, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $60, %rdi -; AVX2-NEXT: sarq $63, %rdi -; AVX2-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1 -; AVX2-NEXT: shlq $59, %r13 -; AVX2-NEXT: sarq $63, %r13 -; AVX2-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $58, %rsi -; AVX2-NEXT: sarq $63, %rsi -; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1 -; AVX2-NEXT: shlq $57, %r10 -; AVX2-NEXT: sarq $63, %r10 -; AVX2-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1 -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; AVX2-NEXT: shrq $7, %rcx -; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $55, %r9 -; AVX2-NEXT: sarq $63, %r9 -; AVX2-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $54, %rbx -; AVX2-NEXT: sarq $63, %rbx -; AVX2-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $53, %r14 -; AVX2-NEXT: sarq $63, %r14 -; AVX2-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $52, %r15 -; AVX2-NEXT: sarq $63, %r15 -; AVX2-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $51, %r12 -; AVX2-NEXT: sarq $63, %r12 -; AVX2-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $50, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX2-NEXT: shlq $49, %r8 -; AVX2-NEXT: sarq $63, %r8 -; AVX2-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1 -; AVX2-NEXT: shrq $15, %rdx -; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-NEXT: leaq -40(%rbp), %rsp -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r12 -; AVX2-NEXT: popq %r13 -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [49376,32928,16480,32,49376,32928,16480,32,49376,32928,16480,32,49376,32928,16480,32] +; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsllw $2, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm1 +; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i32_32i8: @@ -1503,159 +538,74 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) { ; SSE2-SSSE3-LABEL: ext_i8_8i64: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: shrl $7, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0] -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3] -; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7] -; SSE2-SSSE3-NEXT: psllq $63, %xmm0 +; SSE2-SSSE3-NEXT: # kill: %EDI %EDI %RDI +; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,0,1] +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: psllq $62, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: psllq $63, %xmm1 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-SSSE3-NEXT: psrad $31, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3] -; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7] -; SSE2-SSSE3-NEXT: psllq $63, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: psllq $60, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: psllq $61, %xmm2 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] ; SSE2-SSSE3-NEXT: psrad $31, %xmm1 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3] -; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7] -; SSE2-SSSE3-NEXT: psllq $63, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: psllq $58, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSE2-SSSE3-NEXT: psllq $59, %xmm4 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1] ; SSE2-SSSE3-NEXT: psrad $31, %xmm2 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3] -; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7] -; SSE2-SSSE3-NEXT: psllq $63, %xmm3 -; SSE2-SSSE3-NEXT: psrad $31, %xmm3 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSE2-SSSE3-NEXT: psllq $56, %xmm4 +; SSE2-SSSE3-NEXT: psllq $57, %xmm3 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1] +; SSE2-SSSE3-NEXT: psrad $31, %xmm4 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i8_8i64: ; AVX1: # BB#0: -; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $4, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $5, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $6, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $7, %eax -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1 -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 -; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 -; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 -; AVX1-NEXT: vpmovsxdq %xmm1, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: # kill: %EDI %EDI %RDI +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; AVX1-NEXT: vpsllq $60, %xmm1, %xmm0 +; AVX1-NEXT: vpsllq $61, %xmm1, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpsllq $62, %xmm1, %xmm3 +; AVX1-NEXT: vpsllq $63, %xmm1, %xmm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; AVX1-NEXT: vpsllq $56, %xmm1, %xmm3 +; AVX1-NEXT: vpsllq $57, %xmm1, %xmm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm3 +; AVX1-NEXT: vpsllq $58, %xmm1, %xmm4 +; AVX1-NEXT: vpsllq $59, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7] +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i8_8i64: ; AVX2: # BB#0: -; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: movl %eax, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 -; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $3, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $4, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $5, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $6, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $7, %eax -; AVX2-NEXT: movzwl %ax, %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1 -; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 -; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 -; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: # kill: %EDI %EDI %RDI +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1 +; AVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm1, %ymm0 +; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i8_8i64: @@ -1671,261 +621,65 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) { ; SSE2-SSSE3-LABEL: ext_i16_16i32: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] -; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSE2-SSSE3-NEXT: pslld $31, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,1073741824,536870912,268435456] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: movd %edi, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-SSSE3-NEXT: psrad $31, %xmm0 -; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: pslld $31, %xmm1 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [134217728,67108864,33554432,16777216] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-SSSE3-NEXT: psrad $31, %xmm1 -; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] -; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-SSSE3-NEXT: pslld $31, %xmm2 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [8388608,4194304,2097152,1048576] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm4 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] ; SSE2-SSSE3-NEXT: psrad $31, %xmm2 -; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-SSSE3-NEXT: pslld $31, %xmm3 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [524288,262144,131072,65536] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm5 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] ; SSE2-SSSE3-NEXT: psrad $31, %xmm3 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i16_16i32: ; AVX1: # BB#0: -; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $4, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $5, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $6, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $7, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $9, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $10, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $11, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $12, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $13, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $14, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $15, %eax -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX1-NEXT: vpslld $31, %xmm2, %xmm2 -; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] -; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm0 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX1-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm2 ; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i16_16i32: ; AVX2: # BB#0: -; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: movl %eax, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 -; AVX2-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $3, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $4, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $5, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $6, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $7, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $9, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $10, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $11, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $12, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $13, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $14, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $15, %eax -; AVX2-NEXT: movzwl %ax, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1 -; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm0, %ymm1 +; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm0 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 -; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1 ; AVX2-NEXT: retq ; @@ -1942,558 +696,57 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) { ; SSE2-SSSE3-LABEL: ext_i32_32i16: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movl %edi, %eax -; SSE2-SSSE3-NEXT: shrl $16, %eax -; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-SSSE3-NEXT: movd %edi, %xmm3 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,0,0,0,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [32768,16384,8192,4096,2048,1024,512,256] ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-SSSE3-NEXT: psllw $15, %xmm0 +; SSE2-SSSE3-NEXT: pmullw %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: psraw $15, %xmm0 -; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; SSE2-SSSE3-NEXT: psllw $15, %xmm1 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [128,64,32,16,8,4,2,1] +; SSE2-SSSE3-NEXT: pmullw %xmm4, %xmm1 ; SSE2-SSSE3-NEXT: psraw $15, %xmm1 -; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: psllw $15, %xmm2 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] +; SSE2-SSSE3-NEXT: pmullw %xmm3, %xmm2 ; SSE2-SSSE3-NEXT: psraw $15, %xmm2 -; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] -; SSE2-SSSE3-NEXT: psllw $15, %xmm3 +; SSE2-SSSE3-NEXT: pmullw %xmm4, %xmm3 ; SSE2-SSSE3-NEXT: psraw $15, %xmm3 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i32_32i16: ; AVX1: # BB#0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: .Lcfi20: -; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: .Lcfi21: -; AVX1-NEXT: .cfi_offset %rbp, -16 -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: .Lcfi22: -; AVX1-NEXT: .cfi_def_cfa_register %rbp -; AVX1-NEXT: pushq %r15 -; AVX1-NEXT: pushq %r14 -; AVX1-NEXT: pushq %r13 -; AVX1-NEXT: pushq %r12 -; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $128, %rsp -; AVX1-NEXT: .Lcfi23: -; AVX1-NEXT: .cfi_offset %rbx, -56 -; AVX1-NEXT: .Lcfi24: -; AVX1-NEXT: .cfi_offset %r12, -48 -; AVX1-NEXT: .Lcfi25: -; AVX1-NEXT: .cfi_offset %r13, -40 -; AVX1-NEXT: .Lcfi26: -; AVX1-NEXT: .cfi_offset %r14, -32 -; AVX1-NEXT: .Lcfi27: -; AVX1-NEXT: .cfi_offset %r15, -24 -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, %r13d -; AVX1-NEXT: movl %edi, %r12d -; AVX1-NEXT: movl %edi, %r15d -; AVX1-NEXT: movl %edi, %r14d -; AVX1-NEXT: movl %edi, %ebx -; AVX1-NEXT: movl %edi, %r11d -; AVX1-NEXT: movl %edi, %r10d -; AVX1-NEXT: movl %edi, %r9d -; AVX1-NEXT: movl %edi, %r8d -; AVX1-NEXT: movl %edi, %esi -; AVX1-NEXT: movl %edi, %edx -; AVX1-NEXT: movl %edi, %ecx -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: andl $1, %edi -; AVX1-NEXT: vmovd %edi, %xmm0 -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $3, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $4, %esi -; AVX1-NEXT: andl $1, %esi -; AVX1-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0 -; AVX1-NEXT: shrl $5, %r8d -; AVX1-NEXT: andl $1, %r8d -; AVX1-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $6, %r9d -; AVX1-NEXT: andl $1, %r9d -; AVX1-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $7, %r10d -; AVX1-NEXT: andl $1, %r10d -; AVX1-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $8, %r11d -; AVX1-NEXT: andl $1, %r11d -; AVX1-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $9, %ebx -; AVX1-NEXT: andl $1, %ebx -; AVX1-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $10, %r14d -; AVX1-NEXT: andl $1, %r14d -; AVX1-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $11, %r15d -; AVX1-NEXT: andl $1, %r15d -; AVX1-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $12, %r12d -; AVX1-NEXT: andl $1, %r12d -; AVX1-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $13, %r13d -; AVX1-NEXT: andl $1, %r13d -; AVX1-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $14, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $15, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $16, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $17, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $18, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $19, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $20, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $21, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $22, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $23, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $24, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $25, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $26, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $27, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $28, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $29, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $30, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2 -; AVX1-NEXT: vpsraw $15, %xmm2, %xmm2 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %edi, %xmm1 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,16384,8192,4096,2048,1024,512,256] +; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vpsraw $15, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [128,64,32,16,8,4,2,1] +; AVX1-NEXT: vpmullw %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm2 ; AVX1-NEXT: vpsraw $15, %xmm2, %xmm2 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpsraw $15, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; AVX1-NEXT: leaq -40(%rbp), %rsp -; AVX1-NEXT: popq %rbx -; AVX1-NEXT: popq %r12 -; AVX1-NEXT: popq %r13 -; AVX1-NEXT: popq %r14 -; AVX1-NEXT: popq %r15 -; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i32_32i16: ; AVX2: # BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .Lcfi20: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .Lcfi21: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .Lcfi22: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %r13 -; AVX2-NEXT: pushq %r12 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $128, %rsp -; AVX2-NEXT: .Lcfi23: -; AVX2-NEXT: .cfi_offset %rbx, -56 -; AVX2-NEXT: .Lcfi24: -; AVX2-NEXT: .cfi_offset %r12, -48 -; AVX2-NEXT: .Lcfi25: -; AVX2-NEXT: .cfi_offset %r13, -40 -; AVX2-NEXT: .Lcfi26: -; AVX2-NEXT: .cfi_offset %r14, -32 -; AVX2-NEXT: .Lcfi27: -; AVX2-NEXT: .cfi_offset %r15, -24 -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, %r13d -; AVX2-NEXT: movl %edi, %r12d -; AVX2-NEXT: movl %edi, %r15d -; AVX2-NEXT: movl %edi, %r14d -; AVX2-NEXT: movl %edi, %ebx -; AVX2-NEXT: movl %edi, %r11d -; AVX2-NEXT: movl %edi, %r10d -; AVX2-NEXT: movl %edi, %r9d -; AVX2-NEXT: movl %edi, %r8d -; AVX2-NEXT: movl %edi, %esi -; AVX2-NEXT: movl %edi, %edx -; AVX2-NEXT: movl %edi, %ecx -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: andl $1, %edi ; AVX2-NEXT: vmovd %edi, %xmm0 -; AVX2-NEXT: shrl %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $3, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $4, %esi -; AVX2-NEXT: andl $1, %esi -; AVX2-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0 -; AVX2-NEXT: shrl $5, %r8d -; AVX2-NEXT: andl $1, %r8d -; AVX2-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $6, %r9d -; AVX2-NEXT: andl $1, %r9d -; AVX2-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $7, %r10d -; AVX2-NEXT: andl $1, %r10d -; AVX2-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $8, %r11d -; AVX2-NEXT: andl $1, %r11d -; AVX2-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $9, %ebx -; AVX2-NEXT: andl $1, %ebx -; AVX2-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $10, %r14d -; AVX2-NEXT: andl $1, %r14d -; AVX2-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $11, %r15d -; AVX2-NEXT: andl $1, %r15d -; AVX2-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $12, %r12d -; AVX2-NEXT: andl $1, %r12d -; AVX2-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $13, %r13d -; AVX2-NEXT: andl $1, %r13d -; AVX2-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $14, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $15, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $16, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $17, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $18, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $19, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $20, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $21, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $22, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $23, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $24, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $25, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $26, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $27, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $28, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $29, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $30, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $31, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX2-NEXT: vpsllw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2,1] +; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX2-NEXT: vpsllw $15, %ymm1, %ymm1 +; AVX2-NEXT: shrl $16, %edi +; AVX2-NEXT: vmovd %edi, %xmm2 +; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2 +; AVX2-NEXT: vpmullw %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsraw $15, %ymm1, %ymm1 -; AVX2-NEXT: leaq -40(%rbp), %rsp -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r12 -; AVX2-NEXT: popq %r13 -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i32_32i16: @@ -2509,967 +762,193 @@ define <64 x i8> @ext_i64_64i8(i64 %a0) { ; SSE2-SSSE3-LABEL: ext_i64_64i8: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: pushq %rbp -; SSE2-SSSE3-NEXT: .Lcfi24: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16 -; SSE2-SSSE3-NEXT: pushq %r15 -; SSE2-SSSE3-NEXT: .Lcfi25: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24 -; SSE2-SSSE3-NEXT: pushq %r14 -; SSE2-SSSE3-NEXT: .Lcfi26: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32 -; SSE2-SSSE3-NEXT: pushq %r13 -; SSE2-SSSE3-NEXT: .Lcfi27: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40 -; SSE2-SSSE3-NEXT: pushq %r12 -; SSE2-SSSE3-NEXT: .Lcfi28: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48 -; SSE2-SSSE3-NEXT: pushq %rbx -; SSE2-SSSE3-NEXT: .Lcfi29: -; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56 -; SSE2-SSSE3-NEXT: .Lcfi30: -; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56 -; SSE2-SSSE3-NEXT: .Lcfi31: -; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48 -; SSE2-SSSE3-NEXT: .Lcfi32: -; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40 -; SSE2-SSSE3-NEXT: .Lcfi33: -; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32 -; SSE2-SSSE3-NEXT: .Lcfi34: -; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24 -; SSE2-SSSE3-NEXT: .Lcfi35: -; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16 -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movq %rdi, %rax -; SSE2-SSSE3-NEXT: shrq $32, %rax -; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movq %rdi, %rax -; SSE2-SSSE3-NEXT: shrq $48, %rax -; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: shrl $16, %edi -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rbx -; SSE2-SSSE3-NEXT: movq %rbx, %r8 -; SSE2-SSSE3-NEXT: movq %rbx, %r9 -; SSE2-SSSE3-NEXT: movq %rbx, %r10 -; SSE2-SSSE3-NEXT: movq %rbx, %r11 -; SSE2-SSSE3-NEXT: movq %rbx, %r14 -; SSE2-SSSE3-NEXT: movq %rbx, %r15 -; SSE2-SSSE3-NEXT: movq %rbx, %r12 -; SSE2-SSSE3-NEXT: movq %rbx, %r13 -; SSE2-SSSE3-NEXT: movq %rbx, %rdi -; SSE2-SSSE3-NEXT: movq %rbx, %rcx -; SSE2-SSSE3-NEXT: movq %rbx, %rdx -; SSE2-SSSE3-NEXT: movq %rbx, %rsi -; SSE2-SSSE3-NEXT: movq %rbx, %rbp -; SSE2-SSSE3-NEXT: movq %rbx, %rax -; SSE2-SSSE3-NEXT: shrq $15, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm0 -; SSE2-SSSE3-NEXT: movq %rbx, %rax -; SSE2-SSSE3-NEXT: movsbq %bl, %rbx -; SSE2-SSSE3-NEXT: shlq $49, %r8 -; SSE2-SSSE3-NEXT: sarq $63, %r8 -; SSE2-SSSE3-NEXT: movd %r8d, %xmm15 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7] -; SSE2-SSSE3-NEXT: shlq $50, %r9 -; SSE2-SSSE3-NEXT: sarq $63, %r9 -; SSE2-SSSE3-NEXT: movd %r9d, %xmm8 -; SSE2-SSSE3-NEXT: shlq $51, %r10 -; SSE2-SSSE3-NEXT: sarq $63, %r10 -; SSE2-SSSE3-NEXT: movd %r10d, %xmm2 -; SSE2-SSSE3-NEXT: shlq $52, %r11 -; SSE2-SSSE3-NEXT: sarq $63, %r11 -; SSE2-SSSE3-NEXT: movd %r11d, %xmm9 -; SSE2-SSSE3-NEXT: shlq $53, %r14 -; SSE2-SSSE3-NEXT: sarq $63, %r14 -; SSE2-SSSE3-NEXT: movd %r14d, %xmm6 -; SSE2-SSSE3-NEXT: shlq $54, %r15 -; SSE2-SSSE3-NEXT: sarq $63, %r15 -; SSE2-SSSE3-NEXT: movd %r15d, %xmm10 -; SSE2-SSSE3-NEXT: shlq $55, %r12 -; SSE2-SSSE3-NEXT: sarq $63, %r12 -; SSE2-SSSE3-NEXT: movd %r12d, %xmm4 -; SSE2-SSSE3-NEXT: shlq $60, %r13 -; SSE2-SSSE3-NEXT: sarq $63, %r13 -; SSE2-SSSE3-NEXT: movd %r13d, %xmm11 -; SSE2-SSSE3-NEXT: shlq $61, %rdi -; SSE2-SSSE3-NEXT: sarq $63, %rdi -; SSE2-SSSE3-NEXT: movd %edi, %xmm5 -; SSE2-SSSE3-NEXT: shlq $62, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm12 -; SSE2-SSSE3-NEXT: shlq $63, %rdx -; SSE2-SSSE3-NEXT: sarq $63, %rdx -; SSE2-SSSE3-NEXT: movd %edx, %xmm0 -; SSE2-SSSE3-NEXT: shlq $58, %rsi -; SSE2-SSSE3-NEXT: sarq $63, %rsi -; SSE2-SSSE3-NEXT: movd %esi, %xmm13 -; SSE2-SSSE3-NEXT: shlq $59, %rbp -; SSE2-SSSE3-NEXT: sarq $63, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm7 -; SSE2-SSSE3-NEXT: shlq $57, %rax -; SSE2-SSSE3-NEXT: sarq $63, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm3 -; SSE2-SSSE3-NEXT: shrq $7, %rbx -; SSE2-SSSE3-NEXT: movd %ebx, %xmm14 -; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi -; SSE2-SSSE3-NEXT: movq %rsi, %r8 -; SSE2-SSSE3-NEXT: movq %rsi, %r9 -; SSE2-SSSE3-NEXT: movq %rsi, %r10 -; SSE2-SSSE3-NEXT: movq %rsi, %r11 -; SSE2-SSSE3-NEXT: movq %rsi, %r14 -; SSE2-SSSE3-NEXT: movq %rsi, %r15 -; SSE2-SSSE3-NEXT: movq %rsi, %r12 -; SSE2-SSSE3-NEXT: movq %rsi, %r13 -; SSE2-SSSE3-NEXT: movq %rsi, %rbx -; SSE2-SSSE3-NEXT: movq %rsi, %rax -; SSE2-SSSE3-NEXT: movq %rsi, %rcx -; SSE2-SSSE3-NEXT: movq %rsi, %rdx -; SSE2-SSSE3-NEXT: movq %rsi, %rdi -; SSE2-SSSE3-NEXT: movq %rsi, %rbp -; SSE2-SSSE3-NEXT: shrq $15, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm1 -; SSE2-SSSE3-NEXT: movq %rsi, %rbp -; SSE2-SSSE3-NEXT: movsbq %sil, %rsi -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0] -; SSE2-SSSE3-NEXT: shlq $49, %r8 -; SSE2-SSSE3-NEXT: sarq $63, %r8 -; SSE2-SSSE3-NEXT: movd %r8d, %xmm13 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1],xmm13[2],xmm1[2],xmm13[3],xmm1[3],xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7] -; SSE2-SSSE3-NEXT: shlq $50, %r9 -; SSE2-SSSE3-NEXT: sarq $63, %r9 -; SSE2-SSSE3-NEXT: movd %r9d, %xmm1 -; SSE2-SSSE3-NEXT: shlq $51, %r10 -; SSE2-SSSE3-NEXT: sarq $63, %r10 -; SSE2-SSSE3-NEXT: movd %r10d, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE2-SSSE3-NEXT: shlq $52, %r11 -; SSE2-SSSE3-NEXT: sarq $63, %r11 -; SSE2-SSSE3-NEXT: movd %r11d, %xmm8 -; SSE2-SSSE3-NEXT: shlq $53, %r14 -; SSE2-SSSE3-NEXT: sarq $63, %r14 -; SSE2-SSSE3-NEXT: movd %r14d, %xmm15 -; SSE2-SSSE3-NEXT: shlq $54, %r15 -; SSE2-SSSE3-NEXT: sarq $63, %r15 -; SSE2-SSSE3-NEXT: movd %r15d, %xmm9 -; SSE2-SSSE3-NEXT: shlq $55, %r12 -; SSE2-SSSE3-NEXT: sarq $63, %r12 -; SSE2-SSSE3-NEXT: movd %r12d, %xmm4 -; SSE2-SSSE3-NEXT: shlq $60, %r13 -; SSE2-SSSE3-NEXT: sarq $63, %r13 -; SSE2-SSSE3-NEXT: movd %r13d, %xmm10 -; SSE2-SSSE3-NEXT: shlq $61, %rbx -; SSE2-SSSE3-NEXT: sarq $63, %rbx -; SSE2-SSSE3-NEXT: movd %ebx, %xmm7 -; SSE2-SSSE3-NEXT: shlq $62, %rax -; SSE2-SSSE3-NEXT: sarq $63, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm11 -; SSE2-SSSE3-NEXT: shlq $63, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shlq $58, %rdx -; SSE2-SSSE3-NEXT: sarq $63, %rdx -; SSE2-SSSE3-NEXT: movd %edx, %xmm12 -; SSE2-SSSE3-NEXT: shlq $59, %rdi -; SSE2-SSSE3-NEXT: sarq $63, %rdi -; SSE2-SSSE3-NEXT: movd %edi, %xmm5 -; SSE2-SSSE3-NEXT: shlq $57, %rbp -; SSE2-SSSE3-NEXT: sarq $63, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm1 -; SSE2-SSSE3-NEXT: shrq $7, %rsi -; SSE2-SSSE3-NEXT: movd %esi, %xmm14 -; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi -; SSE2-SSSE3-NEXT: movq %rsi, %r8 -; SSE2-SSSE3-NEXT: movq %rsi, %r9 -; SSE2-SSSE3-NEXT: movq %rsi, %r10 -; SSE2-SSSE3-NEXT: movq %rsi, %r11 -; SSE2-SSSE3-NEXT: movq %rsi, %r14 -; SSE2-SSSE3-NEXT: movq %rsi, %r15 -; SSE2-SSSE3-NEXT: movq %rsi, %r12 -; SSE2-SSSE3-NEXT: movq %rsi, %r13 -; SSE2-SSSE3-NEXT: movq %rsi, %rbx -; SSE2-SSSE3-NEXT: movq %rsi, %rax -; SSE2-SSSE3-NEXT: movq %rsi, %rcx -; SSE2-SSSE3-NEXT: movq %rsi, %rdx -; SSE2-SSSE3-NEXT: movq %rsi, %rdi -; SSE2-SSSE3-NEXT: movq %rsi, %rbp -; SSE2-SSSE3-NEXT: shrq $15, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm6 -; SSE2-SSSE3-NEXT: movq %rsi, %rbp -; SSE2-SSSE3-NEXT: movsbq %sil, %rsi -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] -; SSE2-SSSE3-NEXT: shlq $49, %r8 -; SSE2-SSSE3-NEXT: sarq $63, %r8 -; SSE2-SSSE3-NEXT: movd %r8d, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] -; SSE2-SSSE3-NEXT: shlq $50, %r9 -; SSE2-SSSE3-NEXT: sarq $63, %r9 -; SSE2-SSSE3-NEXT: movd %r9d, %xmm3 -; SSE2-SSSE3-NEXT: shlq $51, %r10 -; SSE2-SSSE3-NEXT: sarq $63, %r10 -; SSE2-SSSE3-NEXT: movd %r10d, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] -; SSE2-SSSE3-NEXT: shlq $52, %r11 -; SSE2-SSSE3-NEXT: sarq $63, %r11 -; SSE2-SSSE3-NEXT: movd %r11d, %xmm8 -; SSE2-SSSE3-NEXT: shlq $53, %r14 -; SSE2-SSSE3-NEXT: sarq $63, %r14 -; SSE2-SSSE3-NEXT: movd %r14d, %xmm13 -; SSE2-SSSE3-NEXT: shlq $54, %r15 -; SSE2-SSSE3-NEXT: sarq $63, %r15 -; SSE2-SSSE3-NEXT: movd %r15d, %xmm9 -; SSE2-SSSE3-NEXT: shlq $55, %r12 -; SSE2-SSSE3-NEXT: sarq $63, %r12 -; SSE2-SSSE3-NEXT: movd %r12d, %xmm1 -; SSE2-SSSE3-NEXT: shlq $60, %r13 -; SSE2-SSSE3-NEXT: sarq $63, %r13 -; SSE2-SSSE3-NEXT: movd %r13d, %xmm10 -; SSE2-SSSE3-NEXT: shlq $61, %rbx -; SSE2-SSSE3-NEXT: sarq $63, %rbx -; SSE2-SSSE3-NEXT: movd %ebx, %xmm15 -; SSE2-SSSE3-NEXT: shlq $62, %rax -; SSE2-SSSE3-NEXT: sarq $63, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm11 -; SSE2-SSSE3-NEXT: shlq $63, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: shlq $58, %rdx -; SSE2-SSSE3-NEXT: sarq $63, %rdx -; SSE2-SSSE3-NEXT: movd %edx, %xmm12 -; SSE2-SSSE3-NEXT: shlq $59, %rdi -; SSE2-SSSE3-NEXT: sarq $63, %rdi -; SSE2-SSSE3-NEXT: movd %edi, %xmm5 -; SSE2-SSSE3-NEXT: shlq $57, %rbp -; SSE2-SSSE3-NEXT: sarq $63, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm6 -; SSE2-SSSE3-NEXT: shrq $7, %rsi -; SSE2-SSSE3-NEXT: movd %esi, %xmm14 -; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi -; SSE2-SSSE3-NEXT: movq %rsi, %r8 -; SSE2-SSSE3-NEXT: movq %rsi, %r9 -; SSE2-SSSE3-NEXT: movq %rsi, %r10 -; SSE2-SSSE3-NEXT: movq %rsi, %r11 -; SSE2-SSSE3-NEXT: movq %rsi, %r14 -; SSE2-SSSE3-NEXT: movq %rsi, %r15 -; SSE2-SSSE3-NEXT: movq %rsi, %r12 -; SSE2-SSSE3-NEXT: movq %rsi, %r13 -; SSE2-SSSE3-NEXT: movq %rsi, %rbx -; SSE2-SSSE3-NEXT: movq %rsi, %rax -; SSE2-SSSE3-NEXT: movq %rsi, %rcx -; SSE2-SSSE3-NEXT: movq %rsi, %rdx -; SSE2-SSSE3-NEXT: movq %rsi, %rdi -; SSE2-SSSE3-NEXT: movq %rsi, %rbp -; SSE2-SSSE3-NEXT: shrq $15, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm7 -; SSE2-SSSE3-NEXT: movq %rsi, %rbp -; SSE2-SSSE3-NEXT: movsbq %sil, %rsi -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1],xmm3[2],xmm11[2],xmm3[3],xmm11[3],xmm3[4],xmm11[4],xmm3[5],xmm11[5],xmm3[6],xmm11[6],xmm3[7],xmm11[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3],xmm6[4],xmm14[4],xmm6[5],xmm14[5],xmm6[6],xmm14[6],xmm6[7],xmm14[7] -; SSE2-SSSE3-NEXT: shlq $49, %r8 -; SSE2-SSSE3-NEXT: sarq $63, %r8 -; SSE2-SSSE3-NEXT: movd %r8d, %xmm4 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3] -; SSE2-SSSE3-NEXT: shlq $50, %r9 -; SSE2-SSSE3-NEXT: sarq $63, %r9 -; SSE2-SSSE3-NEXT: movd %r9d, %xmm6 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] -; SSE2-SSSE3-NEXT: shlq $51, %r10 -; SSE2-SSSE3-NEXT: sarq $63, %r10 -; SSE2-SSSE3-NEXT: movd %r10d, %xmm5 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0] -; SSE2-SSSE3-NEXT: shlq $52, %r11 -; SSE2-SSSE3-NEXT: sarq $63, %r11 -; SSE2-SSSE3-NEXT: movd %r11d, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3],xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7] -; SSE2-SSSE3-NEXT: shlq $53, %r14 -; SSE2-SSSE3-NEXT: sarq $63, %r14 -; SSE2-SSSE3-NEXT: movd %r14d, %xmm7 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] -; SSE2-SSSE3-NEXT: shlq $54, %r15 -; SSE2-SSSE3-NEXT: sarq $63, %r15 -; SSE2-SSSE3-NEXT: movd %r15d, %xmm6 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] -; SSE2-SSSE3-NEXT: shlq $55, %r12 -; SSE2-SSSE3-NEXT: sarq $63, %r12 -; SSE2-SSSE3-NEXT: movd %r12d, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3],xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7] -; SSE2-SSSE3-NEXT: shlq $60, %r13 -; SSE2-SSSE3-NEXT: sarq $63, %r13 -; SSE2-SSSE3-NEXT: movd %r13d, %xmm8 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7] -; SSE2-SSSE3-NEXT: shlq $61, %rbx -; SSE2-SSSE3-NEXT: sarq $63, %rbx -; SSE2-SSSE3-NEXT: movd %ebx, %xmm6 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3] -; SSE2-SSSE3-NEXT: shlq $62, %rax -; SSE2-SSSE3-NEXT: sarq $63, %rax -; SSE2-SSSE3-NEXT: movd %eax, %xmm7 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] -; SSE2-SSSE3-NEXT: shlq $63, %rcx -; SSE2-SSSE3-NEXT: sarq $63, %rcx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7] -; SSE2-SSSE3-NEXT: shlq $58, %rdx -; SSE2-SSSE3-NEXT: sarq $63, %rdx -; SSE2-SSSE3-NEXT: movd %edx, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7] -; SSE2-SSSE3-NEXT: shlq $59, %rdi -; SSE2-SSSE3-NEXT: sarq $63, %rdi -; SSE2-SSSE3-NEXT: movd %edi, %xmm7 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3] -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] -; SSE2-SSSE3-NEXT: shlq $57, %rbp -; SSE2-SSSE3-NEXT: sarq $63, %rbp -; SSE2-SSSE3-NEXT: movd %ebp, %xmm5 -; SSE2-SSSE3-NEXT: shrq $7, %rsi -; SSE2-SSSE3-NEXT: movd %esi, %xmm6 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] -; SSE2-SSSE3-NEXT: popq %rbx -; SSE2-SSSE3-NEXT: popq %r12 -; SSE2-SSSE3-NEXT: popq %r13 -; SSE2-SSSE3-NEXT: popq %r14 -; SSE2-SSSE3-NEXT: popq %r15 -; SSE2-SSSE3-NEXT: popq %rbp +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [49376,32928,16480,32,49376,32928,16480,32] +; SSE2-SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm4 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm0, %xmm4 +; SSE2-SSSE3-NEXT: movq %rdi, %xmm8 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[0,0,1,1,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: psllw $4, %xmm1 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm9 +; SSE2-SSSE3-NEXT: pand %xmm9, %xmm1 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: paddb %xmm0, %xmm0 +; SSE2-SSSE3-NEXT: pxor %xmm6, %xmm6 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm0, %xmm6 +; SSE2-SSSE3-NEXT: movdqa %xmm6, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: psllw $2, %xmm1 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm10 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-SSSE3-NEXT: pand %xmm6, %xmm10 +; SSE2-SSSE3-NEXT: pand %xmm10, %xmm1 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: paddb %xmm0, %xmm0 +; SSE2-SSSE3-NEXT: pxor %xmm7, %xmm7 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm0, %xmm7 +; SSE2-SSSE3-NEXT: movdqa %xmm7, %xmm0 +; SSE2-SSSE3-NEXT: pandn %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: paddb %xmm1, %xmm1 +; SSE2-SSSE3-NEXT: pand %xmm7, %xmm1 +; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: pxor %xmm0, %xmm0 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[2,2,3,3,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm1 +; SSE2-SSSE3-NEXT: pandn %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: psllw $4, %xmm2 +; SSE2-SSSE3-NEXT: pand %xmm9, %xmm2 +; SSE2-SSSE3-NEXT: por %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm6, %xmm1 +; SSE2-SSSE3-NEXT: pandn %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: psllw $2, %xmm2 +; SSE2-SSSE3-NEXT: pand %xmm10, %xmm2 +; SSE2-SSSE3-NEXT: por %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm7, %xmm1 +; SSE2-SSSE3-NEXT: pandn %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: paddb %xmm2, %xmm2 +; SSE2-SSSE3-NEXT: pand %xmm7, %xmm2 +; SSE2-SSSE3-NEXT: por %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm1 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,4,4,5,5] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,2,3,3] +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm5, %xmm2 +; SSE2-SSSE3-NEXT: psllw $4, %xmm5 +; SSE2-SSSE3-NEXT: pand %xmm9, %xmm5 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm5 +; SSE2-SSSE3-NEXT: movdqa %xmm6, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm5, %xmm2 +; SSE2-SSSE3-NEXT: psllw $2, %xmm5 +; SSE2-SSSE3-NEXT: pand %xmm10, %xmm5 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm5 +; SSE2-SSSE3-NEXT: movdqa %xmm7, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm5, %xmm2 +; SSE2-SSSE3-NEXT: paddb %xmm5, %xmm5 +; SSE2-SSSE3-NEXT: pand %xmm7, %xmm5 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm5 +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm5, %xmm2 +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm5 = xmm8[0,1,2,3,6,6,7,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,2,3,3] +; SSE2-SSSE3-NEXT: pandn %xmm5, %xmm4 +; SSE2-SSSE3-NEXT: psllw $4, %xmm5 +; SSE2-SSSE3-NEXT: pand %xmm9, %xmm5 +; SSE2-SSSE3-NEXT: por %xmm4, %xmm5 +; SSE2-SSSE3-NEXT: pandn %xmm5, %xmm6 +; SSE2-SSSE3-NEXT: psllw $2, %xmm5 +; SSE2-SSSE3-NEXT: pand %xmm10, %xmm5 +; SSE2-SSSE3-NEXT: por %xmm6, %xmm5 +; SSE2-SSSE3-NEXT: movdqa %xmm7, %xmm4 +; SSE2-SSSE3-NEXT: pandn %xmm5, %xmm4 +; SSE2-SSSE3-NEXT: paddb %xmm5, %xmm5 +; SSE2-SSSE3-NEXT: pand %xmm7, %xmm5 +; SSE2-SSSE3-NEXT: por %xmm4, %xmm5 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm5, %xmm3 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i64_64i8: ; AVX1: # BB#0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: .Lcfi28: -; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: .Lcfi29: -; AVX1-NEXT: .cfi_offset %rbp, -16 -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: .Lcfi30: -; AVX1-NEXT: .cfi_def_cfa_register %rbp -; AVX1-NEXT: pushq %r15 -; AVX1-NEXT: pushq %r14 -; AVX1-NEXT: pushq %r13 -; AVX1-NEXT: pushq %r12 -; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $128, %rsp -; AVX1-NEXT: .Lcfi31: -; AVX1-NEXT: .cfi_offset %rbx, -56 -; AVX1-NEXT: .Lcfi32: -; AVX1-NEXT: .cfi_offset %r12, -48 -; AVX1-NEXT: .Lcfi33: -; AVX1-NEXT: .cfi_offset %r13, -40 -; AVX1-NEXT: .Lcfi34: -; AVX1-NEXT: .cfi_offset %r14, -32 -; AVX1-NEXT: .Lcfi35: -; AVX1-NEXT: .cfi_offset %r15, -24 -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) -; AVX1-NEXT: shrq $32, %rdi -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) -; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rdx -; AVX1-NEXT: movq %rdx, %rcx -; AVX1-NEXT: shlq $47, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: movq %rdx, %r8 -; AVX1-NEXT: movq %rdx, %rcx -; AVX1-NEXT: movq %rdx, %rdi -; AVX1-NEXT: movq %rdx, %r13 -; AVX1-NEXT: movq %rdx, %rsi -; AVX1-NEXT: movq %rdx, %r10 -; AVX1-NEXT: movq %rdx, %r11 -; AVX1-NEXT: movq %rdx, %r9 -; AVX1-NEXT: movq %rdx, %rbx -; AVX1-NEXT: movq %rdx, %r14 -; AVX1-NEXT: movq %rdx, %r15 -; AVX1-NEXT: movq %rdx, %r12 -; AVX1-NEXT: movq %rdx, %rax -; AVX1-NEXT: shlq $46, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX1-NEXT: shlq $45, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: shlq $44, %r8 -; AVX1-NEXT: sarq $63, %r8 -; AVX1-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r8 -; AVX1-NEXT: shlq $43, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %rcx -; AVX1-NEXT: shlq $42, %rdi -; AVX1-NEXT: sarq $63, %rdi -; AVX1-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %rdi -; AVX1-NEXT: shlq $41, %r13 -; AVX1-NEXT: sarq $63, %r13 -; AVX1-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r13 -; AVX1-NEXT: shlq $40, %rsi -; AVX1-NEXT: sarq $63, %rsi -; AVX1-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %rsi -; AVX1-NEXT: shlq $39, %r10 -; AVX1-NEXT: sarq $63, %r10 -; AVX1-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r10 -; AVX1-NEXT: shlq $38, %r11 -; AVX1-NEXT: sarq $63, %r11 -; AVX1-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0 -; AVX1-NEXT: movsbq %dl, %rax -; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: shlq $37, %r9 -; AVX1-NEXT: sarq $63, %r9 -; AVX1-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r9 -; AVX1-NEXT: shlq $36, %rbx -; AVX1-NEXT: sarq $63, %rbx -; AVX1-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %rbx -; AVX1-NEXT: shlq $35, %r14 -; AVX1-NEXT: sarq $63, %r14 -; AVX1-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r14 -; AVX1-NEXT: shlq $34, %r15 -; AVX1-NEXT: sarq $63, %r15 -; AVX1-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r15 -; AVX1-NEXT: shlq $33, %r12 -; AVX1-NEXT: sarq $63, %r12 -; AVX1-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %r12 -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX1-NEXT: shrq $31, %rax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movq %rdx, %rax -; AVX1-NEXT: shlq $63, %r8 -; AVX1-NEXT: sarq $63, %r8 -; AVX1-NEXT: vmovd %r8d, %xmm1 -; AVX1-NEXT: movq %rdx, %r8 -; AVX1-NEXT: movswq %dx, %rdx -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload -; AVX1-NEXT: shlq $62, %r11 -; AVX1-NEXT: sarq $63, %r11 -; AVX1-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $61, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $60, %rdi -; AVX1-NEXT: sarq $63, %rdi -; AVX1-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1 -; AVX1-NEXT: shlq $59, %r13 -; AVX1-NEXT: sarq $63, %r13 -; AVX1-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $58, %rsi -; AVX1-NEXT: sarq $63, %rsi -; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1 -; AVX1-NEXT: shlq $57, %r10 -; AVX1-NEXT: sarq $63, %r10 -; AVX1-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1 -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; AVX1-NEXT: shrq $7, %rcx -; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $55, %r9 -; AVX1-NEXT: sarq $63, %r9 -; AVX1-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $54, %rbx -; AVX1-NEXT: sarq $63, %rbx -; AVX1-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $53, %r14 -; AVX1-NEXT: sarq $63, %r14 -; AVX1-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $52, %r15 -; AVX1-NEXT: sarq $63, %r15 -; AVX1-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $51, %r12 -; AVX1-NEXT: sarq $63, %r12 -; AVX1-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $50, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX1-NEXT: shlq $49, %r8 -; AVX1-NEXT: sarq $63, %r8 -; AVX1-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1 -; AVX1-NEXT: shrq $15, %rdx -; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 -; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rdx -; AVX1-NEXT: movq %rdx, %rcx -; AVX1-NEXT: shlq $47, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: movq %rdx, %r13 -; AVX1-NEXT: movq %rdx, %rcx -; AVX1-NEXT: movq %rdx, %r9 -; AVX1-NEXT: movq %rdx, %r12 -; AVX1-NEXT: movq %rdx, %rdi -; AVX1-NEXT: movq %rdx, %rbx -; AVX1-NEXT: movq %rdx, %r8 -; AVX1-NEXT: movq %rdx, %r10 -; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: movq %rdx, %rsi -; AVX1-NEXT: movq %rdx, %r11 -; AVX1-NEXT: movq %rdx, %r14 -; AVX1-NEXT: movq %rdx, %r15 -; AVX1-NEXT: movq %rdx, %rax -; AVX1-NEXT: shlq $46, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: shlq $45, %r13 -; AVX1-NEXT: sarq $63, %r13 -; AVX1-NEXT: vpinsrb $2, %r13d, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %r13 -; AVX1-NEXT: shlq $44, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %rcx -; AVX1-NEXT: shlq $43, %r9 -; AVX1-NEXT: sarq $63, %r9 -; AVX1-NEXT: vpinsrb $4, %r9d, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %r9 -; AVX1-NEXT: shlq $42, %r12 -; AVX1-NEXT: sarq $63, %r12 -; AVX1-NEXT: vpinsrb $5, %r12d, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %r12 -; AVX1-NEXT: shlq $41, %rdi -; AVX1-NEXT: sarq $63, %rdi -; AVX1-NEXT: vpinsrb $6, %edi, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %rdi -; AVX1-NEXT: shlq $40, %rbx -; AVX1-NEXT: sarq $63, %rbx -; AVX1-NEXT: vpinsrb $7, %ebx, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %rbx -; AVX1-NEXT: shlq $39, %r8 -; AVX1-NEXT: sarq $63, %r8 -; AVX1-NEXT: vpinsrb $8, %r8d, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %r8 -; AVX1-NEXT: shlq $38, %r10 -; AVX1-NEXT: sarq $63, %r10 -; AVX1-NEXT: vpinsrb $9, %r10d, %xmm2, %xmm2 -; AVX1-NEXT: movsbq %dl, %rax -; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX1-NEXT: shlq $37, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %r10 -; AVX1-NEXT: shlq $36, %rsi -; AVX1-NEXT: sarq $63, %rsi -; AVX1-NEXT: vpinsrb $11, %esi, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %rsi -; AVX1-NEXT: shlq $35, %r11 -; AVX1-NEXT: sarq $63, %r11 -; AVX1-NEXT: vpinsrb $12, %r11d, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %r11 -; AVX1-NEXT: shlq $34, %r14 -; AVX1-NEXT: sarq $63, %r14 -; AVX1-NEXT: vpinsrb $13, %r14d, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %r14 -; AVX1-NEXT: shlq $33, %r15 -; AVX1-NEXT: sarq $63, %r15 -; AVX1-NEXT: vpinsrb $14, %r15d, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %r15 -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX1-NEXT: shrq $31, %rax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdx, %rax -; AVX1-NEXT: shlq $63, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vmovd %ecx, %xmm3 -; AVX1-NEXT: movq %rdx, %rcx -; AVX1-NEXT: movswq %dx, %rdx -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: shlq $62, %r13 -; AVX1-NEXT: sarq $63, %r13 -; AVX1-NEXT: vpinsrb $1, %r13d, %xmm3, %xmm1 -; AVX1-NEXT: shlq $61, %r9 -; AVX1-NEXT: sarq $63, %r9 -; AVX1-NEXT: vpinsrb $2, %r9d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $60, %r12 -; AVX1-NEXT: sarq $63, %r12 -; AVX1-NEXT: vpinsrb $3, %r12d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $59, %rdi -; AVX1-NEXT: sarq $63, %rdi -; AVX1-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1 -; AVX1-NEXT: shlq $58, %rbx -; AVX1-NEXT: sarq $63, %rbx -; AVX1-NEXT: vpinsrb $5, %ebx, %xmm1, %xmm1 -; AVX1-NEXT: shlq $57, %r8 -; AVX1-NEXT: sarq $63, %r8 -; AVX1-NEXT: vpinsrb $6, %r8d, %xmm1, %xmm1 -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; AVX1-NEXT: shrq $7, %rdi -; AVX1-NEXT: vpinsrb $7, %edi, %xmm1, %xmm1 -; AVX1-NEXT: shlq $55, %r10 -; AVX1-NEXT: sarq $63, %r10 -; AVX1-NEXT: vpinsrb $8, %r10d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $54, %rsi -; AVX1-NEXT: sarq $63, %rsi -; AVX1-NEXT: vpinsrb $9, %esi, %xmm1, %xmm1 -; AVX1-NEXT: shlq $53, %r11 -; AVX1-NEXT: sarq $63, %r11 -; AVX1-NEXT: vpinsrb $10, %r11d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $52, %r14 -; AVX1-NEXT: sarq $63, %r14 -; AVX1-NEXT: vpinsrb $11, %r14d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $51, %r15 -; AVX1-NEXT: sarq $63, %r15 -; AVX1-NEXT: vpinsrb $12, %r15d, %xmm1, %xmm1 -; AVX1-NEXT: shlq $50, %rax -; AVX1-NEXT: sarq $63, %rax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX1-NEXT: shlq $49, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: shrq $15, %rdx -; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[2,2,3,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vpsllw $4, %xmm0, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm4 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [49376,32928,16480,32,49376,32928,16480,32] +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpsllw $2, %xmm0, %xmm5 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm6 +; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm7 +; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm7, %xmm7, %xmm7 +; AVX1-NEXT: vpcmpgtb %xmm0, %xmm7, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[0,0,1,1,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; AVX1-NEXT: vpsllw $4, %xmm2, %xmm4 +; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4 +; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm4 +; AVX1-NEXT: vpblendvb %xmm6, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtb %xmm2, %xmm7, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,6,6,7,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX1-NEXT: vpsllw $4, %xmm2, %xmm4 +; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4 +; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm4 +; AVX1-NEXT: vpblendvb %xmm6, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtb %xmm2, %xmm7, %xmm2 +; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,5] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] +; AVX1-NEXT: vpsllw $4, %xmm1, %xmm4 +; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3 +; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtb %xmm1, %xmm7, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: leaq -40(%rbp), %rsp -; AVX1-NEXT: popq %rbx -; AVX1-NEXT: popq %r12 -; AVX1-NEXT: popq %r13 -; AVX1-NEXT: popq %r14 -; AVX1-NEXT: popq %r15 -; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i64_64i8: ; AVX2: # BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .Lcfi28: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .Lcfi29: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .Lcfi30: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %r13 -; AVX2-NEXT: pushq %r12 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $128, %rsp -; AVX2-NEXT: .Lcfi31: -; AVX2-NEXT: .cfi_offset %rbx, -56 -; AVX2-NEXT: .Lcfi32: -; AVX2-NEXT: .cfi_offset %r12, -48 -; AVX2-NEXT: .Lcfi33: -; AVX2-NEXT: .cfi_offset %r13, -40 -; AVX2-NEXT: .Lcfi34: -; AVX2-NEXT: .cfi_offset %r14, -32 -; AVX2-NEXT: .Lcfi35: -; AVX2-NEXT: .cfi_offset %r15, -24 -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) -; AVX2-NEXT: shrq $32, %rdi -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) -; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rdx -; AVX2-NEXT: movq %rdx, %rcx -; AVX2-NEXT: shlq $47, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vmovd %ecx, %xmm0 -; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: movq %rdx, %r8 -; AVX2-NEXT: movq %rdx, %rcx -; AVX2-NEXT: movq %rdx, %rdi -; AVX2-NEXT: movq %rdx, %r13 -; AVX2-NEXT: movq %rdx, %rsi -; AVX2-NEXT: movq %rdx, %r10 -; AVX2-NEXT: movq %rdx, %r11 -; AVX2-NEXT: movq %rdx, %r9 -; AVX2-NEXT: movq %rdx, %rbx -; AVX2-NEXT: movq %rdx, %r14 -; AVX2-NEXT: movq %rdx, %r15 -; AVX2-NEXT: movq %rdx, %r12 -; AVX2-NEXT: movq %rdx, %rax -; AVX2-NEXT: shlq $46, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX2-NEXT: shlq $45, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: shlq $44, %r8 -; AVX2-NEXT: sarq $63, %r8 -; AVX2-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r8 -; AVX2-NEXT: shlq $43, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %rcx -; AVX2-NEXT: shlq $42, %rdi -; AVX2-NEXT: sarq $63, %rdi -; AVX2-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %rdi -; AVX2-NEXT: shlq $41, %r13 -; AVX2-NEXT: sarq $63, %r13 -; AVX2-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r13 -; AVX2-NEXT: shlq $40, %rsi -; AVX2-NEXT: sarq $63, %rsi -; AVX2-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %rsi -; AVX2-NEXT: shlq $39, %r10 -; AVX2-NEXT: sarq $63, %r10 -; AVX2-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r10 -; AVX2-NEXT: shlq $38, %r11 -; AVX2-NEXT: sarq $63, %r11 -; AVX2-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0 -; AVX2-NEXT: movsbq %dl, %rax -; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: shlq $37, %r9 -; AVX2-NEXT: sarq $63, %r9 -; AVX2-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r9 -; AVX2-NEXT: shlq $36, %rbx -; AVX2-NEXT: sarq $63, %rbx -; AVX2-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %rbx -; AVX2-NEXT: shlq $35, %r14 -; AVX2-NEXT: sarq $63, %r14 -; AVX2-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r14 -; AVX2-NEXT: shlq $34, %r15 -; AVX2-NEXT: sarq $63, %r15 -; AVX2-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r15 -; AVX2-NEXT: shlq $33, %r12 -; AVX2-NEXT: sarq $63, %r12 -; AVX2-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %r12 -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX2-NEXT: shrq $31, %rax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movq %rdx, %rax -; AVX2-NEXT: shlq $63, %r8 -; AVX2-NEXT: sarq $63, %r8 -; AVX2-NEXT: vmovd %r8d, %xmm1 -; AVX2-NEXT: movq %rdx, %r8 -; AVX2-NEXT: movswq %dx, %rdx -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload -; AVX2-NEXT: shlq $62, %r11 -; AVX2-NEXT: sarq $63, %r11 -; AVX2-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $61, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $60, %rdi -; AVX2-NEXT: sarq $63, %rdi -; AVX2-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1 -; AVX2-NEXT: shlq $59, %r13 -; AVX2-NEXT: sarq $63, %r13 -; AVX2-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $58, %rsi -; AVX2-NEXT: sarq $63, %rsi -; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1 -; AVX2-NEXT: shlq $57, %r10 -; AVX2-NEXT: sarq $63, %r10 -; AVX2-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1 -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; AVX2-NEXT: shrq $7, %rcx -; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $55, %r9 -; AVX2-NEXT: sarq $63, %r9 -; AVX2-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $54, %rbx -; AVX2-NEXT: sarq $63, %rbx -; AVX2-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $53, %r14 -; AVX2-NEXT: sarq $63, %r14 -; AVX2-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $52, %r15 -; AVX2-NEXT: sarq $63, %r15 -; AVX2-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $51, %r12 -; AVX2-NEXT: sarq $63, %r12 -; AVX2-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $50, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX2-NEXT: shlq $49, %r8 -; AVX2-NEXT: sarq $63, %r8 -; AVX2-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1 -; AVX2-NEXT: shrq $15, %rdx -; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 -; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rdx -; AVX2-NEXT: movq %rdx, %rcx -; AVX2-NEXT: shlq $47, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vmovd %ecx, %xmm2 -; AVX2-NEXT: movq %rdx, %r13 -; AVX2-NEXT: movq %rdx, %rcx -; AVX2-NEXT: movq %rdx, %r9 -; AVX2-NEXT: movq %rdx, %r12 -; AVX2-NEXT: movq %rdx, %rdi -; AVX2-NEXT: movq %rdx, %rbx -; AVX2-NEXT: movq %rdx, %r8 -; AVX2-NEXT: movq %rdx, %r10 -; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: movq %rdx, %rsi -; AVX2-NEXT: movq %rdx, %r11 -; AVX2-NEXT: movq %rdx, %r14 -; AVX2-NEXT: movq %rdx, %r15 -; AVX2-NEXT: movq %rdx, %rax -; AVX2-NEXT: shlq $46, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: shlq $45, %r13 -; AVX2-NEXT: sarq $63, %r13 -; AVX2-NEXT: vpinsrb $2, %r13d, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %r13 -; AVX2-NEXT: shlq $44, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %rcx -; AVX2-NEXT: shlq $43, %r9 -; AVX2-NEXT: sarq $63, %r9 -; AVX2-NEXT: vpinsrb $4, %r9d, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %r9 -; AVX2-NEXT: shlq $42, %r12 -; AVX2-NEXT: sarq $63, %r12 -; AVX2-NEXT: vpinsrb $5, %r12d, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %r12 -; AVX2-NEXT: shlq $41, %rdi -; AVX2-NEXT: sarq $63, %rdi -; AVX2-NEXT: vpinsrb $6, %edi, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %rdi -; AVX2-NEXT: shlq $40, %rbx -; AVX2-NEXT: sarq $63, %rbx -; AVX2-NEXT: vpinsrb $7, %ebx, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %rbx -; AVX2-NEXT: shlq $39, %r8 -; AVX2-NEXT: sarq $63, %r8 -; AVX2-NEXT: vpinsrb $8, %r8d, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %r8 -; AVX2-NEXT: shlq $38, %r10 -; AVX2-NEXT: sarq $63, %r10 -; AVX2-NEXT: vpinsrb $9, %r10d, %xmm2, %xmm2 -; AVX2-NEXT: movsbq %dl, %rax -; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX2-NEXT: shlq $37, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %r10 -; AVX2-NEXT: shlq $36, %rsi -; AVX2-NEXT: sarq $63, %rsi -; AVX2-NEXT: vpinsrb $11, %esi, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %rsi -; AVX2-NEXT: shlq $35, %r11 -; AVX2-NEXT: sarq $63, %r11 -; AVX2-NEXT: vpinsrb $12, %r11d, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %r11 -; AVX2-NEXT: shlq $34, %r14 -; AVX2-NEXT: sarq $63, %r14 -; AVX2-NEXT: vpinsrb $13, %r14d, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %r14 -; AVX2-NEXT: shlq $33, %r15 -; AVX2-NEXT: sarq $63, %r15 -; AVX2-NEXT: vpinsrb $14, %r15d, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %r15 -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX2-NEXT: shrq $31, %rax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdx, %rax -; AVX2-NEXT: shlq $63, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vmovd %ecx, %xmm3 -; AVX2-NEXT: movq %rdx, %rcx -; AVX2-NEXT: movswq %dx, %rdx -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-NEXT: shlq $62, %r13 -; AVX2-NEXT: sarq $63, %r13 -; AVX2-NEXT: vpinsrb $1, %r13d, %xmm3, %xmm1 -; AVX2-NEXT: shlq $61, %r9 -; AVX2-NEXT: sarq $63, %r9 -; AVX2-NEXT: vpinsrb $2, %r9d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $60, %r12 -; AVX2-NEXT: sarq $63, %r12 -; AVX2-NEXT: vpinsrb $3, %r12d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $59, %rdi -; AVX2-NEXT: sarq $63, %rdi -; AVX2-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1 -; AVX2-NEXT: shlq $58, %rbx -; AVX2-NEXT: sarq $63, %rbx -; AVX2-NEXT: vpinsrb $5, %ebx, %xmm1, %xmm1 -; AVX2-NEXT: shlq $57, %r8 -; AVX2-NEXT: sarq $63, %r8 -; AVX2-NEXT: vpinsrb $6, %r8d, %xmm1, %xmm1 -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; AVX2-NEXT: shrq $7, %rdi -; AVX2-NEXT: vpinsrb $7, %edi, %xmm1, %xmm1 -; AVX2-NEXT: shlq $55, %r10 -; AVX2-NEXT: sarq $63, %r10 -; AVX2-NEXT: vpinsrb $8, %r10d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $54, %rsi -; AVX2-NEXT: sarq $63, %rsi -; AVX2-NEXT: vpinsrb $9, %esi, %xmm1, %xmm1 -; AVX2-NEXT: shlq $53, %r11 -; AVX2-NEXT: sarq $63, %r11 -; AVX2-NEXT: vpinsrb $10, %r11d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $52, %r14 -; AVX2-NEXT: sarq $63, %r14 -; AVX2-NEXT: vpinsrb $11, %r14d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $51, %r15 -; AVX2-NEXT: sarq $63, %r15 -; AVX2-NEXT: vpinsrb $12, %r15d, %xmm1, %xmm1 -; AVX2-NEXT: shlq $50, %rax -; AVX2-NEXT: sarq $63, %rax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX2-NEXT: shlq $49, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: shrq $15, %rdx -; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX2-NEXT: leaq -40(%rbp), %rsp -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r12 -; AVX2-NEXT: popq %r13 -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[2,2,3,3,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX2-NEXT: vpsllw $4, %ymm0, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpand %ymm8, %ymm2, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [49376,32928,16480,32,49376,32928,16480,32,49376,32928,16480,32,49376,32928,16480,32] +; AVX2-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpsllw $2, %ymm0, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX2-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX2-NEXT: vpaddb %ymm4, %ymm4, %ymm6 +; AVX2-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX2-NEXT: vpaddb %ymm6, %ymm6, %ymm7 +; AVX2-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,5,5] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] +; AVX2-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,7,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1 +; AVX2-NEXT: vpsllw $4, %ymm1, %ymm3 +; AVX2-NEXT: vpand %ymm8, %ymm3, %ymm3 +; AVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpsllw $2, %ymm1, %ymm3 +; AVX2-NEXT: vpand %ymm5, %ymm3, %ymm3 +; AVX2-NEXT: vpblendvb %ymm6, %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm3 +; AVX2-NEXT: vpblendvb %ymm7, %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpcmpgtb %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i64_64i8: Index: test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll =================================================================== --- test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll +++ test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll @@ -12,31 +12,35 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) { ; SSE2-SSSE3-LABEL: ext_i2_2i64: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: andb $3, %dil -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 -; SSE2-SSSE3-NEXT: shrl %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movq %rax, %xmm1 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: # kill: %EDI %EDI %RDI +; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: psllq $62, %xmm0 +; SSE2-SSSE3-NEXT: psllq $63, %xmm1 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-SSSE3-NEXT: psrlq $63, %xmm0 ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: ext_i2_2i64: -; AVX12: # BB#0: -; AVX12-NEXT: andb $3, %dil -; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vmovq %rcx, %xmm0 -; AVX12-NEXT: shrl %eax -; AVX12-NEXT: andl $1, %eax -; AVX12-NEXT: vmovq %rax, %xmm1 -; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX12-NEXT: retq +; AVX1-LABEL: ext_i2_2i64: +; AVX1: # BB#0: +; AVX1-NEXT: # kill: %EDI %EDI %RDI +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX1-NEXT: vpsllq $62, %xmm0, %xmm1 +; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i2_2i64: +; AVX2: # BB#0: +; AVX2-NEXT: # kill: %EDI %EDI %RDI +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsrlq $63, %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i2_2i64: ; AVX512: # BB#0: @@ -56,57 +60,32 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) { ; SSE2-SSSE3-LABEL: ext_i4_4i32: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: andb $15, %dil -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-SSSE3-NEXT: movd %eax, %xmm0 -; SSE2-SSSE3-NEXT: shrl %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm2 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,1073741824,536870912,268435456] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: movd %edi, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] +; SSE2-SSSE3-NEXT: pmuludq %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-SSSE3-NEXT: psrld $31, %xmm0 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i4_4i32: ; AVX1: # BB#0: -; AVX1-NEXT: andb $15, %dil -; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $3, %eax -; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i4_4i32: ; AVX2: # BB#0: -; AVX2-NEXT: andb $15, %dil -; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl %ecx -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $3, %eax -; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] -; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0 +; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsrld $31, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i4_4i32: @@ -127,82 +106,29 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) { ; SSE2-SSSE3-LABEL: ext_i8_8i16: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: shrl $7, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm3 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE2-SSSE3-NEXT: movd %edi, %xmm0 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-SSSE3-NEXT: pmullw {{.*}}(%rip), %xmm0 +; SSE2-SSSE3-NEXT: psrlw $15, %xmm0 ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: ext_i8_8i16: -; AVX12: # BB#0: -; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: movl %eax, %edx -; AVX12-NEXT: andl $1, %edx -; AVX12-NEXT: vmovd %edx, %xmm0 -; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $2, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $3, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $4, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $5, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $6, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: shrl $7, %eax -; AVX12-NEXT: movzwl %ax, %eax -; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 -; AVX12-NEXT: retq +; AVX1-LABEL: ext_i8_8i16: +; AVX1: # BB#0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i8_8i16: +; AVX2: # BB#0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 +; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i8_8i16: ; AVX512: # BB#0: @@ -254,155 +180,93 @@ } define <16 x i8> @ext_i16_16i8(i16 %a0) { -; SSE2-SSSE3-LABEL: ext_i16_16i8: -; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-SSSE3-NEXT: retq +; SSE2-LABEL: ext_i16_16i8: +; SSE2: # BB#0: +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [49376,32928,16480,32,49376,32928,16480,32] +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm3 +; SSE2-NEXT: movd %edi, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psllw $4, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psllw $2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm2, %xmm2 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: paddb %xmm0, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: psrlw $7, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: ext_i16_16i8: +; SSSE3: # BB#0: +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [49376,32928,16480,32,49376,32928,16480,32] +; SSSE3-NEXT: pxor %xmm1, %xmm1 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpgtb %xmm2, %xmm3 +; SSSE3-NEXT: movd %edi, %xmm0 +; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pandn %xmm0, %xmm4 +; SSSE3-NEXT: psllw $4, %xmm0 +; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSSE3-NEXT: pand %xmm3, %xmm0 +; SSSE3-NEXT: por %xmm4, %xmm0 +; SSSE3-NEXT: paddb %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpgtb %xmm2, %xmm3 +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pandn %xmm0, %xmm4 +; SSSE3-NEXT: psllw $2, %xmm0 +; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSSE3-NEXT: pand %xmm3, %xmm0 +; SSSE3-NEXT: por %xmm4, %xmm0 +; SSSE3-NEXT: paddb %xmm2, %xmm2 +; SSSE3-NEXT: pcmpgtb %xmm2, %xmm1 +; SSSE3-NEXT: movdqa %xmm1, %xmm2 +; SSSE3-NEXT: pandn %xmm0, %xmm2 +; SSSE3-NEXT: paddb %xmm0, %xmm0 +; SSSE3-NEXT: pand %xmm1, %xmm0 +; SSSE3-NEXT: por %xmm2, %xmm0 +; SSSE3-NEXT: psrlw $7, %xmm0 +; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSSE3-NEXT: retq ; ; AVX12-LABEL: ext_i16_16i8: ; AVX12: # BB#0: -; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: movl %eax, %edx -; AVX12-NEXT: andl $1, %edx -; AVX12-NEXT: vmovd %edx, %xmm0 -; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $2, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $3, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $4, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $5, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $6, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $7, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $8, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $9, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $10, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $11, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $12, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $13, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $14, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: shrl $15, %eax -; AVX12-NEXT: movzwl %ax, %eax -; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX12-NEXT: vmovd %edi, %xmm0 +; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] +; AVX12-NEXT: vpsllw $4, %xmm0, %xmm1 +; AVX12-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX12-NEXT: vmovdqa {{.*#+}} xmm2 = [49376,32928,16480,32,49376,32928,16480,32] +; AVX12-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpsllw $2, %xmm0, %xmm1 +; AVX12-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX12-NEXT: vpaddb %xmm2, %xmm2, %xmm2 +; AVX12-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpaddb %xmm0, %xmm0, %xmm1 +; AVX12-NEXT: vpaddb %xmm2, %xmm2, %xmm2 +; AVX12-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX12-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX12-NEXT: retq ; ; AVX512-LABEL: ext_i16_16i8: @@ -521,75 +385,45 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) { ; SSE2-SSSE3-LABEL: ext_i4_4i64: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: andb $15, %dil -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-SSSE3-NEXT: movd %eax, %xmm2 -; SSE2-SSSE3-NEXT: shrl %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm0 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3] -; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [1,1] -; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3] -; SSE2-SSSE3-NEXT: pand %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: # kill: %EDI %EDI %RDI +; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1] +; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: psllq $62, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: psllq $63, %xmm1 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-SSSE3-NEXT: psrlq $63, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: psllq $60, %xmm1 +; SSE2-SSSE3-NEXT: psllq $61, %xmm2 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSE2-SSSE3-NEXT: psrlq $63, %xmm1 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i4_4i64: ; AVX1: # BB#0: -; AVX1-NEXT: andb $15, %dil -; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovq %rcx, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovq %rcx, %xmm1 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovq %rcx, %xmm1 -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vmovq %rax, %xmm2 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: # kill: %EDI %EDI %RDI +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX1-NEXT: vpsllq $62, %xmm0, %xmm1 +; AVX1-NEXT: vpsllq $63, %xmm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1 +; AVX1-NEXT: vpsllq $60, %xmm0, %xmm2 +; AVX1-NEXT: vpsllq $61, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i4_4i64: ; AVX2: # BB#0: -; AVX2-NEXT: andb $15, %dil -; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $3, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovq %rcx, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovq %rcx, %xmm1 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovq %rcx, %xmm1 -; AVX2-NEXT: shrl %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vmovq %rax, %xmm2 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: # kill: %EDI %EDI %RDI +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0 +; AVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpsrlq $63, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i4_4i64: @@ -609,110 +443,43 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) { ; SSE2-SSSE3-LABEL: ext_i8_8i32: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: shrl $7, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm3 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] -; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 -; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,1073741824,536870912,268435456] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: movd %edi, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] +; SSE2-SSSE3-NEXT: pmuludq %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-SSSE3-NEXT: psrld $31, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [134217728,67108864,33554432,16777216] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm2, %xmm3 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-SSSE3-NEXT: psrld $31, %xmm1 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i8_8i32: ; AVX1: # BB#0: -; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $5, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: shrl $4, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $6, %ecx -; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $7, %ecx -; AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: shrl $3, %eax -; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpsrld $31, %xmm1, %xmm1 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i8_8i32: ; AVX2: # BB#0: -; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $5, %ecx -; AVX2-NEXT: movl %eax, %edx -; AVX2-NEXT: shrl $4, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $6, %ecx -; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $7, %ecx -; AVX2-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl %ecx -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: shrl $3, %eax -; AVX2-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0 +; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i8_8i32: @@ -728,229 +495,34 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) { ; SSE2-SSSE3-LABEL: ext_i16_16i16: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1] -; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 -; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: movd %edi, %xmm0 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [32768,16384,8192,4096,2048,1024,512,256] +; SSE2-SSSE3-NEXT: pmullw %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: psrlw $15, %xmm0 +; SSE2-SSSE3-NEXT: pmullw {{.*}}(%rip), %xmm1 +; SSE2-SSSE3-NEXT: psrlw $15, %xmm1 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i16_16i16: ; AVX1: # BB#0: -; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $9, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: shrl $8, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $10, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $11, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $12, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $13, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $14, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $15, %ecx -; AVX1-NEXT: movzwl %cx, %ecx -; AVX1-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: vmovd %edx, %xmm1 -; AVX1-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $4, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $5, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $6, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 -; AVX1-NEXT: shrl $7, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpsrlw $15, %xmm1, %xmm1 +; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i16_16i16: ; AVX2: # BB#0: -; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $9, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: movl %eax, %edx -; AVX2-NEXT: shrl $8, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 -; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $10, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $11, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $12, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $13, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $14, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $15, %ecx -; AVX2-NEXT: movzwl %cx, %ecx -; AVX2-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: movl %eax, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: vmovd %edx, %xmm1 -; AVX2-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $3, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $4, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $5, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $6, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 -; AVX2-NEXT: shrl $7, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 +; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i16_16i16: @@ -966,449 +538,119 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) { ; SSE2-SSSE3-LABEL: ext_i32_32i8: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: shrl $16, %edi -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm7 = [49376,32928,16480,32,49376,32928,16480,32] +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSE2-SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm7, %xmm3 +; SSE2-SSSE3-NEXT: movd %edi, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSE2-SSSE3-NEXT: pandn %xmm0, %xmm4 +; SSE2-SSSE3-NEXT: psllw $4, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm8 +; SSE2-SSSE3-NEXT: pand %xmm8, %xmm0 +; SSE2-SSSE3-NEXT: por %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: paddb %xmm7, %xmm7 +; SSE2-SSSE3-NEXT: pxor %xmm5, %xmm5 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm7, %xmm5 +; SSE2-SSSE3-NEXT: movdqa %xmm5, %xmm4 +; SSE2-SSSE3-NEXT: pandn %xmm0, %xmm4 +; SSE2-SSSE3-NEXT: psllw $2, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-SSSE3-NEXT: pand %xmm5, %xmm6 +; SSE2-SSSE3-NEXT: pand %xmm6, %xmm0 +; SSE2-SSSE3-NEXT: por %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: paddb %xmm7, %xmm7 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm7, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm4 +; SSE2-SSSE3-NEXT: pandn %xmm0, %xmm4 +; SSE2-SSSE3-NEXT: paddb %xmm0, %xmm0 +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: por %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: psrlw $7, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,3,3,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; SSE2-SSSE3-NEXT: pandn %xmm1, %xmm3 +; SSE2-SSSE3-NEXT: psllw $4, %xmm1 +; SSE2-SSSE3-NEXT: pand %xmm8, %xmm1 +; SSE2-SSSE3-NEXT: por %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: pandn %xmm1, %xmm5 +; SSE2-SSSE3-NEXT: psllw $2, %xmm1 +; SSE2-SSSE3-NEXT: pand %xmm6, %xmm1 +; SSE2-SSSE3-NEXT: por %xmm5, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm3 +; SSE2-SSSE3-NEXT: pandn %xmm1, %xmm3 +; SSE2-SSSE3-NEXT: paddb %xmm1, %xmm1 +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: por %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: psrlw $7, %xmm1 +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i32_32i8: ; AVX1: # BB#0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: .Lcfi0: -; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: .Lcfi1: -; AVX1-NEXT: .cfi_offset %rbp, -16 -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: .Lcfi2: -; AVX1-NEXT: .cfi_def_cfa_register %rbp -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $32, %rsp -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $17, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movl %edi, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $18, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $19, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $20, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $21, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $22, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $23, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $24, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $25, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $26, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $27, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $28, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $29, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $30, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movl %edi, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm1 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $3, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $4, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $5, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $6, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $7, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $9, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $10, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $11, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $12, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $13, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $14, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX1-NEXT: shrl $15, %edi -; AVX1-NEXT: andl $1, %edi -; AVX1-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: movq %rbp, %rsp -; AVX1-NEXT: popq %rbp +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[2,2,3,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpand %xmm8, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [49376,32928,16480,32,49376,32928,16480,32] +; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2 +; AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7 +; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vpsllw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsllw $2, %xmm0, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i32_32i8: ; AVX2: # BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .Lcfi0: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .Lcfi1: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .Lcfi2: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $32, %rsp -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $17, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movl %edi, %ecx -; AVX2-NEXT: shrl $16, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm0 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $18, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $19, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $20, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $21, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $22, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $23, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $24, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $25, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $26, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $27, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $28, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $29, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $30, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $31, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movl %edi, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm1 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $2, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $3, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $4, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $5, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $6, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $7, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $9, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $10, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $11, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $12, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $13, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $14, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX2-NEXT: shrl $15, %edi -; AVX2-NEXT: andl $1, %edi -; AVX2-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1 +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-NEXT: movq %rbp, %rsp -; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [49376,32928,16480,32,49376,32928,16480,32,49376,32928,16480,32,49376,32928,16480,32] +; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsllw $2, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm1 +; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i32_32i8: @@ -1428,148 +670,68 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) { ; SSE2-SSSE3-LABEL: ext_i8_8i64: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: shrl $7, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0] -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3] -; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7] -; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3] -; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3] -; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3] -; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: # kill: %EDI %EDI %RDI +; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1] +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: psllq $62, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm1 +; SSE2-SSSE3-NEXT: psllq $63, %xmm1 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-SSSE3-NEXT: psrlq $63, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm1 +; SSE2-SSSE3-NEXT: psllq $60, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: psllq $61, %xmm2 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSE2-SSSE3-NEXT: psrlq $63, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: psllq $58, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: psllq $59, %xmm3 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; SSE2-SSSE3-NEXT: psrlq $63, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: psllq $56, %xmm3 +; SSE2-SSSE3-NEXT: psllq $57, %xmm4 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1] +; SSE2-SSSE3-NEXT: psrlq $63, %xmm3 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i8_8i64: ; AVX1: # BB#0: -; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $4, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $5, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $6, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $7, %eax -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1 -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 -; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1] -; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 -; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 +; AVX1-NEXT: # kill: %EDI %EDI %RDI +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; AVX1-NEXT: vpsllq $62, %xmm1, %xmm0 +; AVX1-NEXT: vpsllq $63, %xmm1, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0 +; AVX1-NEXT: vpsllq $60, %xmm1, %xmm2 +; AVX1-NEXT: vpsllq $61, %xmm1, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpsrlq $63, %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vpsllq $58, %xmm1, %xmm2 +; AVX1-NEXT: vpsllq $59, %xmm1, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpsrlq $63, %xmm2, %xmm2 +; AVX1-NEXT: vpsllq $56, %xmm1, %xmm3 +; AVX1-NEXT: vpsllq $57, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i8_8i64: ; AVX2: # BB#0: -; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: movl %eax, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 -; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $3, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $4, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $5, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $6, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $7, %eax -; AVX2-NEXT: movzwl %ax, %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1 -; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1] -; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: # kill: %EDI %EDI %RDI +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1 +; AVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm1, %ymm0 +; AVX2-NEXT: vpsrlq $63, %ymm0, %ymm0 +; AVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpsrlq $63, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i8_8i64: @@ -1585,253 +747,66 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) { ; SSE2-SSSE3-LABEL: ext_i16_16i32: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] -; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 -; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1 -; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] -; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2 -; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,1073741824,536870912,268435456] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: movd %edi, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-SSSE3-NEXT: psrld $31, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [134217728,67108864,33554432,16777216] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: psrld $31, %xmm1 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [8388608,4194304,2097152,1048576] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm4 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE2-SSSE3-NEXT: psrld $31, %xmm2 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [524288,262144,131072,65536] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm3, %xmm5 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] +; SSE2-SSSE3-NEXT: psrld $31, %xmm3 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i16_16i32: ; AVX1: # BB#0: -; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $4, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $5, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $6, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $7, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $9, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $10, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $11, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $12, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $13, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $14, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $15, %eax -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4,4,5,5,6,6,7,7] -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm0 +; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm2 +; AVX1-NEXT: vpsrld $31, %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] -; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm2 +; AVX1-NEXT: vpsrld $31, %xmm2, %xmm2 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $31, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i16_16i32: ; AVX2: # BB#0: -; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: movl %eax, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 -; AVX2-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $3, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $4, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $5, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $6, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $7, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $9, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $10, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $11, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $12, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $13, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $14, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $15, %eax -; AVX2-NEXT: movzwl %ax, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1 -; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] -; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm0, %ymm1 +; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm0 +; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0 +; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpsrld $31, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i16_16i32: @@ -1847,549 +822,57 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) { ; SSE2-SSSE3-LABEL: ext_i32_32i16: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movl %edi, %eax -; SSE2-SSSE3-NEXT: shrl $16, %eax -; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-SSSE3-NEXT: movd %edi, %xmm3 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,0,0,0,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [32768,16384,8192,4096,2048,1024,512,256] ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 -; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1 -; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2 -; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: pmullw %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: psrlw $15, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [128,64,32,16,8,4,2,1] +; SSE2-SSSE3-NEXT: pmullw %xmm4, %xmm1 +; SSE2-SSSE3-NEXT: psrlw $15, %xmm1 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] +; SSE2-SSSE3-NEXT: pmullw %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: psrlw $15, %xmm2 +; SSE2-SSSE3-NEXT: pmullw %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: psrlw $15, %xmm3 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i32_32i16: ; AVX1: # BB#0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: .Lcfi3: -; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: .Lcfi4: -; AVX1-NEXT: .cfi_offset %rbp, -16 -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: .Lcfi5: -; AVX1-NEXT: .cfi_def_cfa_register %rbp -; AVX1-NEXT: pushq %r15 -; AVX1-NEXT: pushq %r14 -; AVX1-NEXT: pushq %r13 -; AVX1-NEXT: pushq %r12 -; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $128, %rsp -; AVX1-NEXT: .Lcfi6: -; AVX1-NEXT: .cfi_offset %rbx, -56 -; AVX1-NEXT: .Lcfi7: -; AVX1-NEXT: .cfi_offset %r12, -48 -; AVX1-NEXT: .Lcfi8: -; AVX1-NEXT: .cfi_offset %r13, -40 -; AVX1-NEXT: .Lcfi9: -; AVX1-NEXT: .cfi_offset %r14, -32 -; AVX1-NEXT: .Lcfi10: -; AVX1-NEXT: .cfi_offset %r15, -24 -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX1-NEXT: movl %edi, %r13d -; AVX1-NEXT: movl %edi, %r12d -; AVX1-NEXT: movl %edi, %r15d -; AVX1-NEXT: movl %edi, %r14d -; AVX1-NEXT: movl %edi, %ebx -; AVX1-NEXT: movl %edi, %r11d -; AVX1-NEXT: movl %edi, %r10d -; AVX1-NEXT: movl %edi, %r9d -; AVX1-NEXT: movl %edi, %r8d -; AVX1-NEXT: movl %edi, %esi -; AVX1-NEXT: movl %edi, %edx -; AVX1-NEXT: movl %edi, %ecx -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: andl $1, %edi -; AVX1-NEXT: vmovd %edi, %xmm0 -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $3, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $4, %esi -; AVX1-NEXT: andl $1, %esi -; AVX1-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0 -; AVX1-NEXT: shrl $5, %r8d -; AVX1-NEXT: andl $1, %r8d -; AVX1-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $6, %r9d -; AVX1-NEXT: andl $1, %r9d -; AVX1-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $7, %r10d -; AVX1-NEXT: andl $1, %r10d -; AVX1-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $8, %r11d -; AVX1-NEXT: andl $1, %r11d -; AVX1-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $9, %ebx -; AVX1-NEXT: andl $1, %ebx -; AVX1-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $10, %r14d -; AVX1-NEXT: andl $1, %r14d -; AVX1-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $11, %r15d -; AVX1-NEXT: andl $1, %r15d -; AVX1-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $12, %r12d -; AVX1-NEXT: andl $1, %r12d -; AVX1-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0 -; AVX1-NEXT: shrl $13, %r13d -; AVX1-NEXT: andl $1, %r13d -; AVX1-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $14, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $15, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $16, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $17, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $18, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $19, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $20, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $21, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $22, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $23, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $24, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $25, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $26, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $27, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $28, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $29, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $30, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 -; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 -; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 -; AVX1-NEXT: leaq -40(%rbp), %rsp -; AVX1-NEXT: popq %rbx -; AVX1-NEXT: popq %r12 -; AVX1-NEXT: popq %r13 -; AVX1-NEXT: popq %r14 -; AVX1-NEXT: popq %r15 -; AVX1-NEXT: popq %rbp +; AVX1-NEXT: vmovd %edi, %xmm1 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,16384,8192,4096,2048,1024,512,256] +; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlw $15, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [128,64,32,16,8,4,2,1] +; AVX1-NEXT: vpmullw %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm2 +; AVX1-NEXT: vpsrlw $15, %xmm2, %xmm2 +; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $15, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i32_32i16: ; AVX2: # BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .Lcfi3: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .Lcfi4: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .Lcfi5: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %r13 -; AVX2-NEXT: pushq %r12 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $128, %rsp -; AVX2-NEXT: .Lcfi6: -; AVX2-NEXT: .cfi_offset %rbx, -56 -; AVX2-NEXT: .Lcfi7: -; AVX2-NEXT: .cfi_offset %r12, -48 -; AVX2-NEXT: .Lcfi8: -; AVX2-NEXT: .cfi_offset %r13, -40 -; AVX2-NEXT: .Lcfi9: -; AVX2-NEXT: .cfi_offset %r14, -32 -; AVX2-NEXT: .Lcfi10: -; AVX2-NEXT: .cfi_offset %r15, -24 -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill -; AVX2-NEXT: movl %edi, %r13d -; AVX2-NEXT: movl %edi, %r12d -; AVX2-NEXT: movl %edi, %r15d -; AVX2-NEXT: movl %edi, %r14d -; AVX2-NEXT: movl %edi, %ebx -; AVX2-NEXT: movl %edi, %r11d -; AVX2-NEXT: movl %edi, %r10d -; AVX2-NEXT: movl %edi, %r9d -; AVX2-NEXT: movl %edi, %r8d -; AVX2-NEXT: movl %edi, %esi -; AVX2-NEXT: movl %edi, %edx -; AVX2-NEXT: movl %edi, %ecx -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: andl $1, %edi ; AVX2-NEXT: vmovd %edi, %xmm0 -; AVX2-NEXT: shrl %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $3, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $4, %esi -; AVX2-NEXT: andl $1, %esi -; AVX2-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0 -; AVX2-NEXT: shrl $5, %r8d -; AVX2-NEXT: andl $1, %r8d -; AVX2-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $6, %r9d -; AVX2-NEXT: andl $1, %r9d -; AVX2-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $7, %r10d -; AVX2-NEXT: andl $1, %r10d -; AVX2-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $8, %r11d -; AVX2-NEXT: andl $1, %r11d -; AVX2-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $9, %ebx -; AVX2-NEXT: andl $1, %ebx -; AVX2-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $10, %r14d -; AVX2-NEXT: andl $1, %r14d -; AVX2-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $11, %r15d -; AVX2-NEXT: andl $1, %r15d -; AVX2-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $12, %r12d -; AVX2-NEXT: andl $1, %r12d -; AVX2-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0 -; AVX2-NEXT: shrl $13, %r13d -; AVX2-NEXT: andl $1, %r13d -; AVX2-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $14, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $15, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $16, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $17, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $18, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $19, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $20, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $21, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $22, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $23, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $24, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $25, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $26, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $27, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $28, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $29, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $30, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload -; AVX2-NEXT: shrl $31, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: leaq -40(%rbp), %rsp -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r12 -; AVX2-NEXT: popq %r13 -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2,1] +; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0 +; AVX2-NEXT: shrl $16, %edi +; AVX2-NEXT: vmovd %edi, %xmm2 +; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2 +; AVX2-NEXT: vpmullw %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vpsrlw $15, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i32_32i16: @@ -2405,867 +888,200 @@ define <64 x i8> @ext_i64_64i8(i64 %a0) { ; SSE2-SSSE3-LABEL: ext_i64_64i8: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movq %rdi, %rax -; SSE2-SSSE3-NEXT: shrq $32, %rax -; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movq %rdi, %rax -; SSE2-SSSE3-NEXT: shrq $48, %rax -; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: shrl $16, %edi -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm6 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0] -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm6 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm6 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm6 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm7 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [49376,32928,16480,32,49376,32928,16480,32] +; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm4 +; SSE2-SSSE3-NEXT: pxor %xmm7, %xmm7 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm7 +; SSE2-SSSE3-NEXT: movq %rdi, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,0,1,1,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa %xmm7, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: psllw $4, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-SSSE3-NEXT: pand %xmm7, %xmm8 +; SSE2-SSSE3-NEXT: pand %xmm8, %xmm0 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: paddb %xmm1, %xmm1 +; SSE2-SSSE3-NEXT: pxor %xmm6, %xmm6 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm6 +; SSE2-SSSE3-NEXT: movdqa %xmm6, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: psllw $2, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm10 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-SSSE3-NEXT: pand %xmm6, %xmm10 +; SSE2-SSSE3-NEXT: pand %xmm10, %xmm0 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: paddb %xmm1, %xmm1 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm4 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm1 +; SSE2-SSSE3-NEXT: pandn %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: paddb %xmm0, %xmm0 +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: por %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: psrlw $7, %xmm0 +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; SSE2-SSSE3-NEXT: pand %xmm9, %xmm0 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[2,2,3,3,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa %xmm7, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: psllw $4, %xmm1 +; SSE2-SSSE3-NEXT: pand %xmm8, %xmm1 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm6, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: psllw $2, %xmm1 +; SSE2-SSSE3-NEXT: pand %xmm10, %xmm1 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: pandn %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: paddb %xmm1, %xmm1 +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1 +; SSE2-SSSE3-NEXT: por %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: psrlw $7, %xmm1 +; SSE2-SSSE3-NEXT: pand %xmm9, %xmm1 +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,4,5,5] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; SSE2-SSSE3-NEXT: movdqa %xmm7, %xmm5 +; SSE2-SSSE3-NEXT: pandn %xmm2, %xmm5 +; SSE2-SSSE3-NEXT: psllw $4, %xmm2 +; SSE2-SSSE3-NEXT: pand %xmm8, %xmm2 +; SSE2-SSSE3-NEXT: por %xmm5, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm6, %xmm5 +; SSE2-SSSE3-NEXT: pandn %xmm2, %xmm5 +; SSE2-SSSE3-NEXT: psllw $2, %xmm2 +; SSE2-SSSE3-NEXT: pand %xmm10, %xmm2 +; SSE2-SSSE3-NEXT: por %xmm5, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm5 +; SSE2-SSSE3-NEXT: pandn %xmm2, %xmm5 +; SSE2-SSSE3-NEXT: paddb %xmm2, %xmm2 +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: por %xmm5, %xmm2 +; SSE2-SSSE3-NEXT: psrlw $7, %xmm2 +; SSE2-SSSE3-NEXT: pand %xmm9, %xmm2 +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,7,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] +; SSE2-SSSE3-NEXT: pandn %xmm3, %xmm7 +; SSE2-SSSE3-NEXT: psllw $4, %xmm3 +; SSE2-SSSE3-NEXT: pand %xmm8, %xmm3 +; SSE2-SSSE3-NEXT: por %xmm7, %xmm3 +; SSE2-SSSE3-NEXT: pandn %xmm3, %xmm6 +; SSE2-SSSE3-NEXT: psllw $2, %xmm3 +; SSE2-SSSE3-NEXT: pand %xmm10, %xmm3 +; SSE2-SSSE3-NEXT: por %xmm6, %xmm3 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm5 +; SSE2-SSSE3-NEXT: pandn %xmm3, %xmm5 +; SSE2-SSSE3-NEXT: paddb %xmm3, %xmm3 +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: por %xmm5, %xmm3 +; SSE2-SSSE3-NEXT: psrlw $7, %xmm3 +; SSE2-SSSE3-NEXT: pand %xmm9, %xmm3 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: ext_i64_64i8: ; AVX1: # BB#0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: .Lcfi11: -; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: .Lcfi12: -; AVX1-NEXT: .cfi_offset %rbp, -16 -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: .Lcfi13: -; AVX1-NEXT: .cfi_def_cfa_register %rbp -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $64, %rsp -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $17, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movl %edi, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $18, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $19, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $20, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $21, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $22, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $23, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $24, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $25, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $26, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $27, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $28, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $29, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $30, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movl %edi, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm1 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $3, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $4, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $5, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $6, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $7, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $9, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $10, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $11, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $12, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $13, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $14, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $15, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $49, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movq %rdi, %rcx -; AVX1-NEXT: shrq $48, %rcx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm1 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $50, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $51, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $52, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $53, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $54, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $55, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $56, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $57, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $58, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $59, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $60, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $61, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $62, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $63, %rax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $33, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movq %rdi, %rcx -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $34, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $35, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $36, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $37, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $38, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $39, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $40, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $41, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $42, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $43, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $44, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $45, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $46, %rax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX1-NEXT: shrq $47, %rdi -; AVX1-NEXT: andl $1, %edi -; AVX1-NEXT: vpinsrb $15, %edi, %xmm2, %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; AVX1-NEXT: movq %rbp, %rsp -; AVX1-NEXT: popq %rbp +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[2,2,3,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vpsllw $4, %xmm0, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm4 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [49376,32928,16480,32,49376,32928,16480,32] +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpsllw $2, %xmm0, %xmm5 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm6 +; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm7 +; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vpand %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[0,0,1,1,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; AVX1-NEXT: vpsllw $4, %xmm2, %xmm4 +; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4 +; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm4 +; AVX1-NEXT: vpblendvb %xmm6, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,6,6,7,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX1-NEXT: vpsllw $4, %xmm2, %xmm4 +; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4 +; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm4 +; AVX1-NEXT: vpblendvb %xmm6, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,5] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] +; AVX1-NEXT: vpsllw $4, %xmm1, %xmm4 +; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3 +; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ext_i64_64i8: ; AVX2: # BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .Lcfi11: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .Lcfi12: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .Lcfi13: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $64, %rsp -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $17, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movl %edi, %ecx -; AVX2-NEXT: shrl $16, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm0 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $18, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $19, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $20, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $21, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $22, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $23, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $24, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $25, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $26, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $27, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $28, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $29, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $30, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $31, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movl %edi, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm1 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $2, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $3, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $4, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $5, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $6, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $7, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $9, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $10, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $11, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $12, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $13, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $14, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $15, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $49, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movq %rdi, %rcx -; AVX2-NEXT: shrq $48, %rcx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm1 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $50, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $51, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $52, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $53, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $54, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $55, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $56, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $57, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $58, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $59, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $60, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $61, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $62, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $63, %rax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $33, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movq %rdi, %rcx -; AVX2-NEXT: shrq $32, %rcx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm2 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $34, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $35, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $36, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $37, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $38, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $39, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $40, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $41, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $42, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $43, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $44, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $45, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $46, %rax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX2-NEXT: shrq $47, %rdi -; AVX2-NEXT: andl $1, %edi -; AVX2-NEXT: vpinsrb $15, %edi, %xmm2, %xmm2 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; AVX2-NEXT: movq %rbp, %rsp -; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[2,2,3,3,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX2-NEXT: vpsllw $4, %ymm0, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpand %ymm8, %ymm2, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [49376,32928,16480,32,49376,32928,16480,32,49376,32928,16480,32,49376,32928,16480,32] +; AVX2-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpsllw $2, %ymm0, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX2-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX2-NEXT: vpaddb %ymm4, %ymm4, %ymm6 +; AVX2-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX2-NEXT: vpaddb %ymm6, %ymm6, %ymm7 +; AVX2-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,5,5] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] +; AVX2-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,7,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1 +; AVX2-NEXT: vpsllw $4, %ymm1, %ymm3 +; AVX2-NEXT: vpand %ymm8, %ymm3, %ymm3 +; AVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpsllw $2, %ymm1, %ymm3 +; AVX2-NEXT: vpand %ymm5, %ymm3, %ymm3 +; AVX2-NEXT: vpblendvb %ymm6, %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm3 +; AVX2-NEXT: vpblendvb %ymm7, %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlw $7, %ymm1, %ymm1 +; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: ext_i64_64i8: Index: test/CodeGen/X86/bitcast-int-to-vector-bool.ll =================================================================== --- test/CodeGen/X86/bitcast-int-to-vector-bool.ll +++ test/CodeGen/X86/bitcast-int-to-vector-bool.ll @@ -8,29 +8,35 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) { ; SSE2-SSSE3-LABEL: bitcast_i2_2i1: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 -; SSE2-SSSE3-NEXT: shrl %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movq %rax, %xmm1 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: movzbl %dil, %eax +; SSE2-SSSE3-NEXT: movq %rax, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: psllq $62, %xmm0 +; SSE2-SSSE3-NEXT: psllq $63, %xmm1 +; SSE2-SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-SSSE3-NEXT: psrlq $63, %xmm0 ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: bitcast_i2_2i1: -; AVX12: # BB#0: -; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vmovq %rcx, %xmm0 -; AVX12-NEXT: shrl %eax -; AVX12-NEXT: andl $1, %eax -; AVX12-NEXT: vmovq %rax, %xmm1 -; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX12-NEXT: retq +; AVX1-LABEL: bitcast_i2_2i1: +; AVX1: # BB#0: +; AVX1-NEXT: movzbl %dil, %eax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX1-NEXT: vpsllq $62, %xmm0, %xmm1 +; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: bitcast_i2_2i1: +; AVX2: # BB#0: +; AVX2-NEXT: movzbl %dil, %eax +; AVX2-NEXT: vmovq %rax, %xmm0 +; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsrlq $63, %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: bitcast_i2_2i1: ; AVX512: # BB#0: @@ -48,54 +54,35 @@ define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) { ; SSE2-SSSE3-LABEL: bitcast_i4_4i1: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-SSSE3-NEXT: movd %eax, %xmm0 -; SSE2-SSSE3-NEXT: shrl %eax +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,1073741824,536870912,268435456] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: movzbl %dil, %eax ; SSE2-SSSE3-NEXT: movd %eax, %xmm2 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] +; SSE2-SSSE3-NEXT: pmuludq %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-SSSE3-NEXT: pmuludq %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-SSSE3-NEXT: psrld $31, %xmm0 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: bitcast_i4_4i1: ; AVX1: # BB#0: -; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl %ecx +; AVX1-NEXT: movzbl %dil, %eax ; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $3, %eax -; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: bitcast_i4_4i1: ; AVX2: # BB#0: -; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl %ecx +; AVX2-NEXT: movzbl %dil, %eax ; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $3, %eax -; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] -; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0 +; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsrld $31, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: bitcast_i4_4i1: @@ -115,82 +102,29 @@ define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) { ; SSE2-SSSE3-LABEL: bitcast_i8_8i1: ; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: shrl $7, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm3 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE2-SSSE3-NEXT: movd %edi, %xmm0 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-SSSE3-NEXT: pmullw {{.*}}(%rip), %xmm0 +; SSE2-SSSE3-NEXT: psrlw $15, %xmm0 ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: bitcast_i8_8i1: -; AVX12: # BB#0: -; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: movl %eax, %edx -; AVX12-NEXT: andl $1, %edx -; AVX12-NEXT: vmovd %edx, %xmm0 -; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $2, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $3, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $4, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $5, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $6, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: shrl $7, %eax -; AVX12-NEXT: movzwl %ax, %eax -; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 -; AVX12-NEXT: retq +; AVX1-LABEL: bitcast_i8_8i1: +; AVX1: # BB#0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: bitcast_i8_8i1: +; AVX2: # BB#0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 +; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: bitcast_i8_8i1: ; AVX512: # BB#0: @@ -202,155 +136,93 @@ } define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) { -; SSE2-SSSE3-LABEL: bitcast_i16_16i1: -; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-SSSE3-NEXT: retq +; SSE2-LABEL: bitcast_i16_16i1: +; SSE2: # BB#0: +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [49376,32928,16480,32,49376,32928,16480,32] +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm3 +; SSE2-NEXT: movd %edi, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psllw $4, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psllw $2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm2, %xmm2 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: paddb %xmm0, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: psrlw $7, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: bitcast_i16_16i1: +; SSSE3: # BB#0: +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [49376,32928,16480,32,49376,32928,16480,32] +; SSSE3-NEXT: pxor %xmm1, %xmm1 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpgtb %xmm2, %xmm3 +; SSSE3-NEXT: movd %edi, %xmm0 +; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pandn %xmm0, %xmm4 +; SSSE3-NEXT: psllw $4, %xmm0 +; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSSE3-NEXT: pand %xmm3, %xmm0 +; SSSE3-NEXT: por %xmm4, %xmm0 +; SSSE3-NEXT: paddb %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpgtb %xmm2, %xmm3 +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pandn %xmm0, %xmm4 +; SSSE3-NEXT: psllw $2, %xmm0 +; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSSE3-NEXT: pand %xmm3, %xmm0 +; SSSE3-NEXT: por %xmm4, %xmm0 +; SSSE3-NEXT: paddb %xmm2, %xmm2 +; SSSE3-NEXT: pcmpgtb %xmm2, %xmm1 +; SSSE3-NEXT: movdqa %xmm1, %xmm2 +; SSSE3-NEXT: pandn %xmm0, %xmm2 +; SSSE3-NEXT: paddb %xmm0, %xmm0 +; SSSE3-NEXT: pand %xmm1, %xmm0 +; SSSE3-NEXT: por %xmm2, %xmm0 +; SSSE3-NEXT: psrlw $7, %xmm0 +; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSSE3-NEXT: retq ; ; AVX12-LABEL: bitcast_i16_16i1: ; AVX12: # BB#0: -; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: movl %eax, %edx -; AVX12-NEXT: andl $1, %edx -; AVX12-NEXT: vmovd %edx, %xmm0 -; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $2, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $3, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $4, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $5, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $6, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $7, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $8, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $9, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $10, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $11, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $12, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $13, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $14, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: shrl $15, %eax -; AVX12-NEXT: movzwl %ax, %eax -; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX12-NEXT: vmovd %edi, %xmm0 +; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] +; AVX12-NEXT: vpsllw $4, %xmm0, %xmm1 +; AVX12-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX12-NEXT: vmovdqa {{.*#+}} xmm2 = [49376,32928,16480,32,49376,32928,16480,32] +; AVX12-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpsllw $2, %xmm0, %xmm1 +; AVX12-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX12-NEXT: vpaddb %xmm2, %xmm2, %xmm2 +; AVX12-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpaddb %xmm0, %xmm0, %xmm1 +; AVX12-NEXT: vpaddb %xmm2, %xmm2, %xmm2 +; AVX12-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX12-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX12-NEXT: retq ; ; AVX512-LABEL: bitcast_i16_16i1: @@ -371,286 +243,63 @@ ; ; AVX1-LABEL: bitcast_i32_32i1: ; AVX1: # BB#0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: .Lcfi0: -; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: .Lcfi1: -; AVX1-NEXT: .cfi_offset %rbp, -16 -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: .Lcfi2: -; AVX1-NEXT: .cfi_def_cfa_register %rbp -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $32, %rsp -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $17, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movl %edi, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $18, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $19, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $20, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $21, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $22, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $23, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $24, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $25, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $26, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $27, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $28, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $29, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $30, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movl %edi, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm1 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $3, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $4, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $5, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $6, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $7, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $9, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $10, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $11, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $12, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $13, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $14, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX1-NEXT: shrl $15, %edi -; AVX1-NEXT: andl $1, %edi -; AVX1-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: movq %rbp, %rsp -; AVX1-NEXT: popq %rbp +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[2,2,3,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpand %xmm8, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [49376,32928,16480,32,49376,32928,16480,32] +; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2 +; AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7 +; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vpsllw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsllw $2, %xmm0, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: bitcast_i32_32i1: ; AVX2: # BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .Lcfi0: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .Lcfi1: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .Lcfi2: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $32, %rsp -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $17, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movl %edi, %ecx -; AVX2-NEXT: shrl $16, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm0 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $18, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $19, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $20, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $21, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $22, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $23, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $24, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $25, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $26, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $27, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $28, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $29, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $30, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $31, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movl %edi, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm1 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $2, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $3, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $4, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $5, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $6, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $7, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $9, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $10, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $11, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $12, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $13, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $14, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX2-NEXT: shrl $15, %edi -; AVX2-NEXT: andl $1, %edi -; AVX2-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1 +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-NEXT: movq %rbp, %rsp -; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [49376,32928,16480,32,49376,32928,16480,32,49376,32928,16480,32,49376,32928,16480,32] +; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsllw $2, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm1 +; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: bitcast_i32_32i1: