Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1509,12 +1509,20 @@ setOperationAction(ISD::XOR, MVT::v16i32, Legal); if (Subtarget->hasCDI()) { - setOperationAction(ISD::CTLZ, MVT::v8i64, Legal); + setOperationAction(ISD::CTLZ, MVT::v8i64, Legal); setOperationAction(ISD::CTLZ, MVT::v16i32, Legal); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i64, Legal); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i64, Legal); setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i32, Legal); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom); + // Lower the follow vectors using legal vector CTLZ instructions. + setOperationAction(ISD::CTLZ, MVT::v16i8, Custom); + setOperationAction(ISD::CTLZ, MVT::v16i16, Custom); + setOperationAction(ISD::CTLZ, MVT::v32i8, Custom); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i8, Custom); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i16, Custom); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i8, Custom); + + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i32, Custom); } if (Subtarget->hasVLX() && Subtarget->hasCDI()) { @@ -1527,6 +1535,9 @@ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v2i64, Legal); setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i32, Legal); + setOperationAction(ISD::CTLZ, MVT::v8i16, Custom); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i16, Custom); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); @@ -1605,6 +1616,8 @@ setOperationAction(ISD::MULHU, MVT::v32i16, Legal); setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Legal); setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Legal); + setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom); + setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Custom); @@ -1646,6 +1659,13 @@ if (Subtarget->hasVLX()) setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal); + if (Subtarget->hasCDI()) { + setOperationAction(ISD::CTLZ, MVT::v32i16, Custom); + setOperationAction(ISD::CTLZ, MVT::v64i8, Custom); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i16, Custom); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v64i8, Custom); + } + for (int i = MVT::v32i8; i != MVT::v8i64; ++i) { const MVT VT = (MVT::SimpleValueType)i; @@ -17359,12 +17379,52 @@ ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); } -static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { + +static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) { + SDLoc dl(Op); + MVT VT = Op.getSimpleValueType(); + MVT EltVT = VT.getVectorElementType(); + unsigned NumElems = VT.getVectorNumElements(); + + assert((EltVT == MVT::i8 || EltVT == MVT::i16) && + "Unsupported element type"); + + if (512 < (NumElems * 32)) { + // Split vector, it's L0 and Hi parts will be handled in next iteration. + SDValue Lo, Hi; + std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl); + MVT OutVT = MVT::getVectorVT(EltVT, NumElems/2); + + Lo = DAG.getNode(Op.getOpcode(), dl, OutVT, Lo); + Hi = DAG.getNode(Op.getOpcode(), dl, OutVT, Hi); + + return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi); + } + + MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems); + + assert((NewVT.is256BitVector() || NewVT.is512BitVector()) && + "Unsupported value type for operation"); + + // Use native supported vector instruction vplzcntd. + Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0)); + SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op); + SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode); + SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT); + + return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta); +} + +static SDValue LowerCTLZ(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { MVT VT = Op.getSimpleValueType(); EVT OpVT = VT; unsigned NumBits = VT.getSizeInBits(); SDLoc dl(Op); + if (VT.isVector() && Subtarget->hasAVX512()) + return LowerVectorCTLZ_AVX512(Op, DAG); + Op = Op.getOperand(0); if (VT == MVT::i8) { // Zero extend to i32 since there is not an i8 bsr. @@ -17394,12 +17454,16 @@ return Op; } -static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { +static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { MVT VT = Op.getSimpleValueType(); EVT OpVT = VT; unsigned NumBits = VT.getSizeInBits(); SDLoc dl(Op); + if (VT.isVector() && Subtarget->hasAVX512()) + return LowerVectorCTLZ_AVX512(Op, DAG); + Op = Op.getOperand(0); if (VT == MVT::i8) { // Zero extend to i32 since there is not an i8 bsr. @@ -19412,8 +19476,8 @@ case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); - case ISD::CTLZ: return LowerCTLZ(Op, DAG); - case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); + case ISD::CTLZ: return LowerCTLZ(Op, Subtarget, DAG); + case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, Subtarget, DAG); case ISD::CTTZ: case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, DAG); case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); Index: lib/Target/X86/X86InstrAVX512.td =================================================================== --- lib/Target/X86/X86InstrAVX512.td +++ lib/Target/X86/X86InstrAVX512.td @@ -786,6 +786,10 @@ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)), (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; +def : Pat<(insert_subvector undef, (v16i16 VR256X:$src), (iPTR 0)), + (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; +def : Pat<(insert_subvector undef, (v32i8 VR256X:$src), (iPTR 0)), + (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; // vextractps - extract 32 bits from XMM def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst), Index: test/CodeGen/X86/vector-lzcnt-128.ll =================================================================== --- test/CodeGen/X86/vector-lzcnt-128.ll +++ test/CodeGen/X86/vector-lzcnt-128.ll @@ -722,30 +722,10 @@ ; ; AVX512-LABEL: testv8i16: ; AVX512: ## BB#0: -; AVX512-NEXT: vpextrw $1, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vmovd %xmm0, %ecx -; AVX512-NEXT: lzcntw %cx, %cx -; AVX512-NEXT: vmovd %ecx, %xmm1 -; AVX512-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $2, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $3, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $4, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $5, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $6, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $7, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 +; AVX512-NEXT: vpmovzxwd %xmm0, %ymm0 +; AVX512-NEXT: vplzcntd %ymm0, %ymm0 +; AVX512-NEXT: vpmovdw %ymm0, %xmm0 +; AVX512-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: retq %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 0) ret <8 x i16> %out @@ -956,30 +936,10 @@ ; ; AVX512-LABEL: testv8i16u: ; AVX512: ## BB#0: -; AVX512-NEXT: vpextrw $1, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vmovd %xmm0, %ecx -; AVX512-NEXT: lzcntw %cx, %cx -; AVX512-NEXT: vmovd %ecx, %xmm1 -; AVX512-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $2, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $3, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $4, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $5, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $6, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrw $7, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 +; AVX512-NEXT: vpmovzxwd %xmm0, %ymm0 +; AVX512-NEXT: vplzcntd %ymm0, %ymm0 +; AVX512-NEXT: vpmovdw %ymm0, %xmm0 +; AVX512-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: retq %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 -1) ret <8 x i16> %out @@ -1474,70 +1434,10 @@ ; ; AVX512-LABEL: testv16i8: ; AVX512: ## BB#0: -; AVX512-NEXT: vpextrb $1, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpextrb $0, %xmm0, %ecx -; AVX512-NEXT: lzcntl %ecx, %ecx -; AVX512-NEXT: addl $-24, %ecx -; AVX512-NEXT: vmovd %ecx, %xmm1 -; AVX512-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $2, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $3, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $4, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $5, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $6, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $7, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $8, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $9, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $10, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $11, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $12, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $13, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $14, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $15, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 +; AVX512-NEXT: vpmovzxbd %xmm0, %zmm0 +; AVX512-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: retq %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 0) ret <16 x i8> %out @@ -1941,70 +1841,10 @@ ; ; AVX512-LABEL: testv16i8u: ; AVX512: ## BB#0: -; AVX512-NEXT: vpextrb $1, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpextrb $0, %xmm0, %ecx -; AVX512-NEXT: lzcntl %ecx, %ecx -; AVX512-NEXT: addl $-24, %ecx -; AVX512-NEXT: vmovd %ecx, %xmm1 -; AVX512-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $2, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $3, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $4, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $5, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $6, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $7, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $8, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $9, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $10, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $11, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $12, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $13, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $14, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $15, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 +; AVX512-NEXT: vpmovzxbd %xmm0, %zmm0 +; AVX512-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: retq %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 -1) ret <16 x i8> %out Index: test/CodeGen/X86/vector-lzcnt-256.ll =================================================================== --- test/CodeGen/X86/vector-lzcnt-256.ll +++ test/CodeGen/X86/vector-lzcnt-256.ll @@ -495,56 +495,10 @@ ; ; AVX512-LABEL: testv16i16: ; AVX512: ## BB#0: -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpextrw $1, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vmovd %xmm1, %ecx -; AVX512-NEXT: lzcntw %cx, %cx -; AVX512-NEXT: vmovd %ecx, %xmm2 -; AVX512-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $2, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $3, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $4, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $5, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $6, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $7, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1 -; AVX512-NEXT: vpextrw $1, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vmovd %xmm0, %ecx -; AVX512-NEXT: lzcntw %cx, %cx -; AVX512-NEXT: vmovd %ecx, %xmm2 -; AVX512-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $2, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $3, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $4, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $5, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $6, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $7, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 -; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0 +; AVX512-NEXT: vpmovzxwd %ymm0, %zmm0 +; AVX512-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 ; AVX512-NEXT: retq %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 0) ret <16 x i16> %out @@ -693,56 +647,10 @@ ; ; AVX512-LABEL: testv16i16u: ; AVX512: ## BB#0: -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpextrw $1, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vmovd %xmm1, %ecx -; AVX512-NEXT: lzcntw %cx, %cx -; AVX512-NEXT: vmovd %ecx, %xmm2 -; AVX512-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $2, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $3, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $4, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $5, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $6, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $7, %xmm1, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1 -; AVX512-NEXT: vpextrw $1, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vmovd %xmm0, %ecx -; AVX512-NEXT: lzcntw %cx, %cx -; AVX512-NEXT: vmovd %ecx, %xmm2 -; AVX512-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $2, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $3, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $4, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $5, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $6, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $7, %xmm0, %eax -; AVX512-NEXT: lzcntw %ax, %ax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 -; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0 +; AVX512-NEXT: vpmovzxwd %ymm0, %zmm0 +; AVX512-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 ; AVX512-NEXT: retq %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 -1) ret <16 x i16> %out @@ -1085,135 +993,16 @@ ; ; AVX512-LABEL: testv32i8: ; AVX512: ## BB#0: -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpextrb $1, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpextrb $0, %xmm1, %ecx -; AVX512-NEXT: lzcntl %ecx, %ecx -; AVX512-NEXT: addl $-24, %ecx -; AVX512-NEXT: vmovd %ecx, %xmm2 -; AVX512-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $2, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $3, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $4, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $5, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $6, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $7, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $8, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $9, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $10, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $11, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $12, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $13, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $14, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $15, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 -; AVX512-NEXT: vpextrb $1, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpextrb $0, %xmm0, %ecx -; AVX512-NEXT: lzcntl %ecx, %ecx -; AVX512-NEXT: addl $-24, %ecx -; AVX512-NEXT: vmovd %ecx, %xmm2 -; AVX512-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $2, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $3, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $4, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $5, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $6, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $7, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $8, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $9, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $10, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $11, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $12, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $13, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $14, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $15, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 +; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vpmovzxbd %xmm1, %zmm1 +; AVX512-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512-NEXT: vpmovdb %zmm1, %xmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} xmm2 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] +; AVX512-NEXT: vpsubb %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpmovzxbd %xmm0, %zmm0 +; AVX512-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512-NEXT: vpsubb %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0) @@ -1491,135 +1280,16 @@ ; ; AVX512-LABEL: testv32i8u: ; AVX512: ## BB#0: -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpextrb $1, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpextrb $0, %xmm1, %ecx -; AVX512-NEXT: lzcntl %ecx, %ecx -; AVX512-NEXT: addl $-24, %ecx -; AVX512-NEXT: vmovd %ecx, %xmm2 -; AVX512-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $2, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $3, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $4, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $5, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $6, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $7, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $8, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $9, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $10, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $11, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $12, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $13, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $14, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $15, %xmm1, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 -; AVX512-NEXT: vpextrb $1, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpextrb $0, %xmm0, %ecx -; AVX512-NEXT: lzcntl %ecx, %ecx -; AVX512-NEXT: addl $-24, %ecx -; AVX512-NEXT: vmovd %ecx, %xmm2 -; AVX512-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $2, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $3, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $4, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $5, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $6, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $7, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $8, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $9, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $10, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $11, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $12, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $13, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $14, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $15, %xmm0, %eax -; AVX512-NEXT: lzcntl %eax, %eax -; AVX512-NEXT: addl $-24, %eax -; AVX512-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 +; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vpmovzxbd %xmm1, %zmm1 +; AVX512-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512-NEXT: vpmovdb %zmm1, %xmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} xmm2 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] +; AVX512-NEXT: vpsubb %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpmovzxbd %xmm0, %zmm0 +; AVX512-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512-NEXT: vpsubb %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1) Index: test/CodeGen/X86/vector-lzcnt-512.ll =================================================================== --- test/CodeGen/X86/vector-lzcnt-512.ll +++ test/CodeGen/X86/vector-lzcnt-512.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512CD +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512BW define <8 x i64> @testv8i64(<8 x i64> %in) nounwind { ; ALL-LABEL: testv8i64: @@ -39,107 +40,31 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind { ; ALL-LABEL: testv32i16: ; ALL: ## BB#0: -; ALL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; ALL-NEXT: vpextrw $1, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vmovd %xmm2, %ecx -; ALL-NEXT: lzcntw %cx, %cx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $2, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $3, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $4, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $5, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $6, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $7, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2 -; ALL-NEXT: vpextrw $1, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vmovd %xmm0, %ecx -; ALL-NEXT: lzcntw %cx, %cx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $2, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $3, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $4, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $5, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $6, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $7, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm0 -; ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; ALL-NEXT: vextracti128 $1, %ymm1, %xmm2 -; ALL-NEXT: vpextrw $1, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vmovd %xmm2, %ecx -; ALL-NEXT: lzcntw %cx, %cx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $2, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $3, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $4, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $5, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $6, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $7, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2 -; ALL-NEXT: vpextrw $1, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vmovd %xmm1, %ecx -; ALL-NEXT: lzcntw %cx, %cx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $2, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $3, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $4, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $5, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $6, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $7, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm1 -; ALL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; ALL-NEXT: vpmovzxwd %ymm0, %zmm0 +; ALL-NEXT: vplzcntd %zmm0, %zmm0 +; ALL-NEXT: vpmovdw %zmm0, %ymm0 +; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] +; ALL-NEXT: vpsubw %ymm2, %ymm0, %ymm0 +; ALL-NEXT: vpmovzxwd %ymm1, %zmm1 +; ALL-NEXT: vplzcntd %zmm1, %zmm1 +; ALL-NEXT: vpmovdw %zmm1, %ymm1 +; ALL-NEXT: vpsubw %ymm2, %ymm1, %ymm1 +; ALL-NEXT: retq +; +; AVX512BW-LABEL: testv32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512BW-NEXT: vpmovzxwd %ymm1, %zmm1 +; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1 +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] +; AVX512BW-NEXT: vpsubw %ymm2, %ymm1, %ymm1 +; AVX512BW-NEXT: vpmovzxwd %ymm0, %zmm0 +; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512BW-NEXT: vpsubw %ymm2, %ymm0, %ymm0 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 0) ret <32 x i16> %out } @@ -147,107 +72,31 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind { ; ALL-LABEL: testv32i16u: ; ALL: ## BB#0: -; ALL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; ALL-NEXT: vpextrw $1, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vmovd %xmm2, %ecx -; ALL-NEXT: lzcntw %cx, %cx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $2, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $3, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $4, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $5, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $6, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $7, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2 -; ALL-NEXT: vpextrw $1, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vmovd %xmm0, %ecx -; ALL-NEXT: lzcntw %cx, %cx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $2, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $3, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $4, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $5, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $6, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $7, %xmm0, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm0 -; ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; ALL-NEXT: vextracti128 $1, %ymm1, %xmm2 -; ALL-NEXT: vpextrw $1, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vmovd %xmm2, %ecx -; ALL-NEXT: lzcntw %cx, %cx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $2, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $3, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $4, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $5, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $6, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $7, %xmm2, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2 -; ALL-NEXT: vpextrw $1, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vmovd %xmm1, %ecx -; ALL-NEXT: lzcntw %cx, %cx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $2, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $3, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $4, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $5, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $6, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrw $7, %xmm1, %eax -; ALL-NEXT: lzcntw %ax, %ax -; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm1 -; ALL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; ALL-NEXT: vpmovzxwd %ymm0, %zmm0 +; ALL-NEXT: vplzcntd %zmm0, %zmm0 +; ALL-NEXT: vpmovdw %zmm0, %ymm0 +; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] +; ALL-NEXT: vpsubw %ymm2, %ymm0, %ymm0 +; ALL-NEXT: vpmovzxwd %ymm1, %zmm1 +; ALL-NEXT: vplzcntd %zmm1, %zmm1 +; ALL-NEXT: vpmovdw %zmm1, %ymm1 +; ALL-NEXT: vpsubw %ymm2, %ymm1, %ymm1 +; ALL-NEXT: retq +; +; AVX512BW-LABEL: testv32i16u: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512BW-NEXT: vpmovzxwd %ymm1, %zmm1 +; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1 +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] +; AVX512BW-NEXT: vpsubw %ymm2, %ymm1, %ymm1 +; AVX512BW-NEXT: vpmovzxwd %ymm0, %zmm0 +; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512BW-NEXT: vpsubw %ymm2, %ymm0, %ymm0 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 -1) ret <32 x i16> %out } @@ -255,267 +104,55 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind { ; ALL-LABEL: testv64i8: ; ALL: ## BB#0: -; ALL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; ALL-NEXT: vpextrb $1, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpextrb $0, %xmm2, %ecx -; ALL-NEXT: lzcntl %ecx, %ecx -; ALL-NEXT: addl $-24, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $2, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $3, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $4, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $5, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $6, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $7, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $8, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $9, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $10, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $11, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $12, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $13, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $14, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $15, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm2 -; ALL-NEXT: vpextrb $1, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpextrb $0, %xmm0, %ecx -; ALL-NEXT: lzcntl %ecx, %ecx -; ALL-NEXT: addl $-24, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $2, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $3, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $4, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $5, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $6, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $7, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $8, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $9, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $10, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $11, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $12, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $13, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $14, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $15, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm0 +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2 +; ALL-NEXT: vpmovzxbd %xmm2, %zmm2 +; ALL-NEXT: vplzcntd %zmm2, %zmm2 +; ALL-NEXT: vpmovdb %zmm2, %xmm2 +; ALL-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] +; ALL-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; ALL-NEXT: vpmovzxbd %xmm0, %zmm0 +; ALL-NEXT: vplzcntd %zmm0, %zmm0 +; ALL-NEXT: vpmovdb %zmm0, %xmm0 +; ALL-NEXT: vpsubb %xmm3, %xmm0, %xmm0 ; ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; ALL-NEXT: vextracti128 $1, %ymm1, %xmm2 -; ALL-NEXT: vpextrb $1, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpextrb $0, %xmm2, %ecx -; ALL-NEXT: lzcntl %ecx, %ecx -; ALL-NEXT: addl $-24, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $2, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $3, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $4, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $5, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $6, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $7, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $8, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $9, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $10, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $11, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $12, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $13, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $14, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $15, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm2 -; ALL-NEXT: vpextrb $1, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpextrb $0, %xmm1, %ecx -; ALL-NEXT: lzcntl %ecx, %ecx -; ALL-NEXT: addl $-24, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $2, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $3, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $4, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $5, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $6, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $7, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $8, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $9, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $10, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $11, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $12, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $13, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $14, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $15, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm1 +; ALL-NEXT: vextractf128 $1, %ymm1, %xmm2 +; ALL-NEXT: vpmovzxbd %xmm2, %zmm2 +; ALL-NEXT: vplzcntd %zmm2, %zmm2 +; ALL-NEXT: vpmovdb %zmm2, %xmm2 +; ALL-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; ALL-NEXT: vpmovzxbd %xmm1, %zmm1 +; ALL-NEXT: vplzcntd %zmm1, %zmm1 +; ALL-NEXT: vpmovdb %zmm1, %xmm1 +; ALL-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; ALL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; ALL-NEXT: retq +; +; AVX512BW-LABEL: testv64i8: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512BW-NEXT: vpmovzxbd %xmm2, %zmm2 +; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] +; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512BW-NEXT: vpmovzxbd %xmm1, %zmm1 +; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1 +; AVX512BW-NEXT: vpsubb %xmm3, %xmm1, %xmm1 +; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512BW-NEXT: vpmovzxbd %xmm2, %zmm2 +; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512BW-NEXT: vpmovzxbd %xmm0, %zmm0 +; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512BW-NEXT: vpsubb %xmm3, %xmm0, %xmm0 +; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0) ret <64 x i8> %out } @@ -523,267 +160,55 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind { ; ALL-LABEL: testv64i8u: ; ALL: ## BB#0: -; ALL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; ALL-NEXT: vpextrb $1, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpextrb $0, %xmm2, %ecx -; ALL-NEXT: lzcntl %ecx, %ecx -; ALL-NEXT: addl $-24, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $2, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $3, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $4, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $5, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $6, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $7, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $8, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $9, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $10, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $11, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $12, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $13, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $14, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $15, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm2 -; ALL-NEXT: vpextrb $1, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpextrb $0, %xmm0, %ecx -; ALL-NEXT: lzcntl %ecx, %ecx -; ALL-NEXT: addl $-24, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $2, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $3, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $4, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $5, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $6, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $7, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $8, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $9, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $10, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $11, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $12, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $13, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $14, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $15, %xmm0, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm0 +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2 +; ALL-NEXT: vpmovzxbd %xmm2, %zmm2 +; ALL-NEXT: vplzcntd %zmm2, %zmm2 +; ALL-NEXT: vpmovdb %zmm2, %xmm2 +; ALL-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] +; ALL-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; ALL-NEXT: vpmovzxbd %xmm0, %zmm0 +; ALL-NEXT: vplzcntd %zmm0, %zmm0 +; ALL-NEXT: vpmovdb %zmm0, %xmm0 +; ALL-NEXT: vpsubb %xmm3, %xmm0, %xmm0 ; ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; ALL-NEXT: vextracti128 $1, %ymm1, %xmm2 -; ALL-NEXT: vpextrb $1, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpextrb $0, %xmm2, %ecx -; ALL-NEXT: lzcntl %ecx, %ecx -; ALL-NEXT: addl $-24, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $2, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $3, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $4, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $5, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $6, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $7, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $8, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $9, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $10, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $11, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $12, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $13, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $14, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $15, %xmm2, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm2 -; ALL-NEXT: vpextrb $1, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpextrb $0, %xmm1, %ecx -; ALL-NEXT: lzcntl %ecx, %ecx -; ALL-NEXT: addl $-24, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $2, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $3, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $4, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $5, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $6, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $7, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $8, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $9, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $10, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $11, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $12, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $13, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $14, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrb $15, %xmm1, %eax -; ALL-NEXT: lzcntl %eax, %eax -; ALL-NEXT: addl $-24, %eax -; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm1 +; ALL-NEXT: vextractf128 $1, %ymm1, %xmm2 +; ALL-NEXT: vpmovzxbd %xmm2, %zmm2 +; ALL-NEXT: vplzcntd %zmm2, %zmm2 +; ALL-NEXT: vpmovdb %zmm2, %xmm2 +; ALL-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; ALL-NEXT: vpmovzxbd %xmm1, %zmm1 +; ALL-NEXT: vplzcntd %zmm1, %zmm1 +; ALL-NEXT: vpmovdb %zmm1, %xmm1 +; ALL-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; ALL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; ALL-NEXT: retq +; +; AVX512BW-LABEL: testv64i8u: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512BW-NEXT: vpmovzxbd %xmm2, %zmm2 +; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] +; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512BW-NEXT: vpmovzxbd %xmm1, %zmm1 +; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1 +; AVX512BW-NEXT: vpsubb %xmm3, %xmm1, %xmm1 +; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512BW-NEXT: vpmovzxbd %xmm2, %zmm2 +; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512BW-NEXT: vpmovzxbd %xmm0, %zmm0 +; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512BW-NEXT: vpsubb %xmm3, %xmm0, %xmm0 +; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1) ret <64 x i8> %out }