Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -15053,6 +15053,42 @@ } } +// Determine if this shuffle can be implemented with a KSHIFT instruction. +// Returns the shift amount if possible or -1 if not. This is a simplified +// version of matchVectorShuffleAsShift. +static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef Mask, + int MaskOffset, const APInt &Zeroable) { + int Size = Mask.size(); + + auto CheckZeros = [&](int Shift, bool Left) { + for (int j = 0; j < Shift; ++j) + if (!Zeroable[j + (Left ? 0 : (Size - Shift))]) + return false; + + return true; + }; + + auto MatchShift = [&](int Shift, bool Left) { + unsigned Pos = Left ? Shift : 0; + unsigned Low = Left ? 0 : Shift; + unsigned Len = Size - Shift; + if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset)) + return false; + + return true; + }; + + for (int Shift = 1; Shift != Size; ++Shift) + for (bool Left : {true, false}) + if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) { + Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR; + return Shift; + } + + return -1; +} + + // Lower vXi1 vector shuffles. // There is no a dedicated instruction on AVX-512 that shuffles the masks. // The only way to shuffle bits is to sign-extend the mask vector to SIMD @@ -15062,6 +15098,9 @@ const APInt &Zeroable, const X86Subtarget &Subtarget, SelectionDAG &DAG) { + assert(Subtarget.hasAVX512() && + "Cannot lower 512-bit vectors w/o basic ISA!"); + unsigned NumElts = Mask.size(); // Try to recognize shuffles that are just padding a subvector with zeros. @@ -15088,9 +15127,23 @@ Extract, DAG.getIntPtrConstant(0, DL)); } + // Try to match KSHIFTs. + // TODO: Support narrower than legal shifts by widening and extracting. + if (NumElts >= 16 || (Subtarget.hasDQI() && NumElts == 8)) { + unsigned Opcode; + int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, 0, Zeroable); + if (ShiftAmt >= 0) + return DAG.getNode(Opcode, DL, VT, V1, + DAG.getConstant(ShiftAmt, DL, MVT::i8)); + + // V1 didn't match, try V2. + ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, NumElts, Zeroable); + if (ShiftAmt >= 0) + return DAG.getNode(Opcode, DL, VT, V2, + DAG.getConstant(ShiftAmt, DL, MVT::i8)); + } + - assert(Subtarget.hasAVX512() && - "Cannot lower 512-bit vectors w/o basic ISA!"); MVT ExtVT; switch (VT.SimpleTy) { default: Index: test/CodeGen/X86/avx512-skx-insert-subvec.ll =================================================================== --- test/CodeGen/X86/avx512-skx-insert-subvec.ll +++ test/CodeGen/X86/avx512-skx-insert-subvec.ll @@ -30,11 +30,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0 ; CHECK-NEXT: vpmovq2m %xmm0, %k0 -; CHECK-NEXT: vpmovm2d %k0, %ymm0 -; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] -; CHECK-NEXT: vpmovd2m %ymm0, %k0 +; CHECK-NEXT: kshiftlb $4, %k0, %k0 ; CHECK-NEXT: vpmovm2w %k0, %xmm0 -; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %res = shufflevector <2 x i1> %a, <2 x i1> zeroinitializer, <8 x i32> ret <8 x i1> %res Index: test/CodeGen/X86/kshift.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/kshift.ll @@ -0,0 +1,725 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=KNL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512dq,avx512bw | FileCheck %s --check-prefix=CHECK --check-prefix=SKX + +define i8 @kshiftl_v8i1_1(<8 x i64> %x, <8 x i64> %y) { +; KNL-LABEL: kshiftl_v8i1_1: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: movb $-2, %al +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $al killed $al killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftl_v8i1_1: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftlb $1, %k0, %k1 +; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <8 x i64> %x, zeroinitializer + %b = shufflevector <8 x i1> %a, <8 x i1> zeroinitializer, <8 x i32> + %c = icmp eq <8 x i64> %y, zeroinitializer + %d = and <8 x i1> %b, %c + %e = bitcast <8 x i1> %d to i8 + ret i8 %e +} + +define i16 @kshiftl_v16i1_1(<16 x i32> %x, <16 x i32> %y) { +; KNL-LABEL: kshiftl_v16i1_1: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k0 +; KNL-NEXT: kshiftlw $1, %k0, %k1 +; KNL-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $ax killed $ax killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftl_v16i1_1: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmd %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftlw $1, %k0, %k1 +; SKX-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <16 x i32> %x, zeroinitializer + %b = shufflevector <16 x i1> %a, <16 x i1> zeroinitializer, <16 x i32> + %c = icmp eq <16 x i32> %y, zeroinitializer + %d = and <16 x i1> %b, %c + %e = bitcast <16 x i1> %d to i16 + ret i16 %e +} + +define i32 @kshiftl_v32i1_1(<32 x i16> %x, <32 x i16> %y) { +; KNL-LABEL: kshiftl_v32i1_1: +; KNL: # %bb.0: +; KNL-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; KNL-NEXT: vpcmpeqw %ymm4, %ymm1, %ymm1 +; KNL-NEXT: vpmovsxwd %ymm1, %zmm1 +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1 +; KNL-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0 +; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2 +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z} +; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z} +; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm0[15],zmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 +; KNL-NEXT: kshiftlw $1, %k2, %k2 +; KNL-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm0 +; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 +; KNL-NEXT: vpcmpeqw %ymm4, %ymm2, %ymm1 +; KNL-NEXT: vpmovsxwd %ymm1, %zmm1 +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 {%k2} +; KNL-NEXT: kmovw %k0, %ecx +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: shll $16, %eax +; KNL-NEXT: orl %ecx, %eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftl_v32i1_1: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmw %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftld $1, %k0, %k1 +; SKX-NEXT: vptestnmw %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <32 x i16> %x, zeroinitializer + %b = shufflevector <32 x i1> %a, <32 x i1> zeroinitializer, <32 x i32> + %c = icmp eq <32 x i16> %y, zeroinitializer + %d = and <32 x i1> %b, %c + %e = bitcast <32 x i1> %d to i32 + ret i32 %e +} + +define i64 @kshiftl_v64i1_1(<64 x i8> %x, <64 x i8> %y) { +; KNL-LABEL: kshiftl_v64i1_1: +; KNL: # %bb.0: +; KNL-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; KNL-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm5 +; KNL-NEXT: vptestmd %zmm5, %zmm5, %k3 +; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2 +; KNL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm0 +; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1 +; KNL-NEXT: vpmovsxbd %xmm1, %zmm1 +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k4 +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k4} {z} +; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z} +; KNL-NEXT: valignd {{.*#+}} zmm1 = zmm0[15],zmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1 +; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z} +; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm1[15],zmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2 +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k3} {z} +; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm0[15],zmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k4 +; KNL-NEXT: kshiftlw $1, %k3, %k3 +; KNL-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm0 +; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1 +; KNL-NEXT: vpmovsxbd %xmm1, %zmm1 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 +; KNL-NEXT: vpcmpeqb %ymm4, %ymm2, %ymm2 +; KNL-NEXT: vextracti128 $1, %ymm2, %xmm3 +; KNL-NEXT: vpmovsxbd %xmm3, %zmm3 +; KNL-NEXT: vpmovsxbd %xmm2, %zmm2 +; KNL-NEXT: vptestmd %zmm2, %zmm2, %k0 {%k3} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: vptestmd %zmm3, %zmm3, %k0 {%k4} +; KNL-NEXT: kmovw %k0, %ecx +; KNL-NEXT: shll $16, %ecx +; KNL-NEXT: orl %eax, %ecx +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k2} +; KNL-NEXT: kmovw %k0, %edx +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: shll $16, %eax +; KNL-NEXT: orl %edx, %eax +; KNL-NEXT: shlq $32, %rax +; KNL-NEXT: orq %rcx, %rax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftl_v64i1_1: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmb %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftlq $1, %k0, %k1 +; SKX-NEXT: vptestnmb %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovq %k0, %rax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <64 x i8> %x, zeroinitializer + %b = shufflevector <64 x i1> %a, <64 x i1> zeroinitializer, <64 x i32> + %c = icmp eq <64 x i8> %y, zeroinitializer + %d = and <64 x i1> %b, %c + %e = bitcast <64 x i1> %d to i64 + ret i64 %e +} + +define i8 @kshiftl_v8i1_7(<8 x i64> %x, <8 x i64> %y) { +; KNL-LABEL: kshiftl_v8i1_7: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: movb $-128, %al +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $al killed $al killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftl_v8i1_7: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftlb $7, %k0, %k1 +; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <8 x i64> %x, zeroinitializer + %b = shufflevector <8 x i1> zeroinitializer, <8 x i1> %a, <8 x i32> + %c = icmp eq <8 x i64> %y, zeroinitializer + %d = and <8 x i1> %b, %c + %e = bitcast <8 x i1> %d to i8 + ret i8 %e +} + +define i16 @kshiftl_v16i1_15(<16 x i32> %x, <16 x i32> %y) { +; KNL-LABEL: kshiftl_v16i1_15: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k0 +; KNL-NEXT: kshiftlw $15, %k0, %k1 +; KNL-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $ax killed $ax killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftl_v16i1_15: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmd %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftlw $15, %k0, %k1 +; SKX-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <16 x i32> %x, zeroinitializer + %b = shufflevector <16 x i1> zeroinitializer, <16 x i1> %a, <16 x i32> + %c = icmp eq <16 x i32> %y, zeroinitializer + %d = and <16 x i1> %b, %c + %e = bitcast <16 x i1> %d to i16 + ret i16 %e +} + +define i32 @kshiftl_v32i1_31(<32 x i16> %x, <32 x i16> %y) { +; KNL-LABEL: kshiftl_v32i1_31: +; KNL: # %bb.0: +; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; KNL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 +; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 +; KNL-NEXT: kshiftlw $15, %k0, %k1 +; KNL-NEXT: vpcmpeqw %ymm1, %ymm3, %ymm0 +; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: shll $16, %eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftl_v32i1_31: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmw %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftld $31, %k0, %k1 +; SKX-NEXT: vptestnmw %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <32 x i16> %x, zeroinitializer + %b = shufflevector <32 x i1> zeroinitializer, <32 x i1> %a, <32 x i32> + %c = icmp eq <32 x i16> %y, zeroinitializer + %d = and <32 x i1> %b, %c + %e = bitcast <32 x i1> %d to i32 + ret i32 %e +} + +define i64 @kshiftl_v64i1_63(<64 x i8> %x, <64 x i8> %y) { +; KNL-LABEL: kshiftl_v64i1_63: +; KNL: # %bb.0: +; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; KNL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 +; KNL-NEXT: kshiftlw $15, %k0, %k1 +; KNL-NEXT: vpcmpeqb %ymm1, %ymm3, %ymm0 +; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: movzwl %ax, %eax +; KNL-NEXT: shlq $48, %rax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftl_v64i1_63: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmb %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftlq $63, %k0, %k1 +; SKX-NEXT: vptestnmb %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovq %k0, %rax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <64 x i8> %x, zeroinitializer + %b = shufflevector <64 x i1> zeroinitializer, <64 x i1> %a, <64 x i32> + %c = icmp eq <64 x i8> %y, zeroinitializer + %d = and <64 x i1> %b, %c + %e = bitcast <64 x i1> %d to i64 + ret i64 %e +} + +define i8 @kshiftr_v8i1_1(<8 x i64> %x, <8 x i64> %y) { +; KNL-LABEL: kshiftr_v8i1_1: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z} +; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,2,3,4,5,6,7,15] +; KNL-NEXT: vpermi2q %zmm0, %zmm2, %zmm3 +; KNL-NEXT: vptestmq %zmm3, %zmm3, %k1 +; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $al killed $al killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftr_v8i1_1: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftrb $1, %k0, %k1 +; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <8 x i64> %x, zeroinitializer + %b = shufflevector <8 x i1> %a, <8 x i1> zeroinitializer, <8 x i32> + %c = icmp eq <8 x i64> %y, zeroinitializer + %d = and <8 x i1> %b, %c + %e = bitcast <8 x i1> %d to i8 + ret i8 %e +} + +define i16 @kshiftr_v16i1_1(<16 x i32> %x, <16 x i32> %y) { +; KNL-LABEL: kshiftr_v16i1_1: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k0 +; KNL-NEXT: kshiftrw $1, %k0, %k1 +; KNL-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $ax killed $ax killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftr_v16i1_1: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmd %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftrw $1, %k0, %k1 +; SKX-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <16 x i32> %x, zeroinitializer + %b = shufflevector <16 x i1> %a, <16 x i1> zeroinitializer, <16 x i32> + %c = icmp eq <16 x i32> %y, zeroinitializer + %d = and <16 x i1> %b, %c + %e = bitcast <16 x i1> %d to i16 + ret i16 %e +} + +define i32 @kshiftr_v32i1_1(<32 x i16> %x, <32 x i16> %y) { +; KNL-LABEL: kshiftr_v32i1_1: +; KNL: # %bb.0: +; KNL-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; KNL-NEXT: vpcmpeqw %ymm4, %ymm1, %ymm1 +; KNL-NEXT: vpmovsxwd %ymm1, %zmm1 +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1 +; KNL-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0 +; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2 +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z} +; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z} +; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0] +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2 +; KNL-NEXT: kshiftrw $1, %k1, %k1 +; KNL-NEXT: vpcmpeqw %ymm4, %ymm2, %ymm0 +; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 +; KNL-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm1 +; KNL-NEXT: vpmovsxwd %ymm1, %zmm1 +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %ecx +; KNL-NEXT: shll $16, %ecx +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k2} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: orl %ecx, %eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftr_v32i1_1: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmw %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftrd $1, %k0, %k1 +; SKX-NEXT: vptestnmw %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <32 x i16> %x, zeroinitializer + %b = shufflevector <32 x i1> %a, <32 x i1> zeroinitializer, <32 x i32> + %c = icmp eq <32 x i16> %y, zeroinitializer + %d = and <32 x i1> %b, %c + %e = bitcast <32 x i1> %d to i32 + ret i32 %e +} + +define i64 @kshiftr_v64i1_1(<64 x i8> %x, <64 x i8> %y) { +; KNL-LABEL: kshiftr_v64i1_1: +; KNL: # %bb.0: +; KNL-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; KNL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm1 +; KNL-NEXT: vextracti128 $1, %ymm1, %xmm5 +; KNL-NEXT: vpmovsxbd %xmm5, %zmm5 +; KNL-NEXT: vptestmd %zmm5, %zmm5, %k3 +; KNL-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm5 +; KNL-NEXT: vptestmd %zmm5, %zmm5, %k2 +; KNL-NEXT: vpmovsxbd %xmm1, %zmm1 +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1 +; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k4 +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k4} {z} +; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z} +; KNL-NEXT: valignd {{.*#+}} zmm5 = zmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0] +; KNL-NEXT: vptestmd %zmm5, %zmm5, %k1 +; KNL-NEXT: vpternlogd $255, %zmm5, %zmm5, %zmm5 {%k2} {z} +; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm5[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm0[0] +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2 +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k3} {z} +; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm0[0] +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k4 +; KNL-NEXT: kshiftrw $1, %k3, %k3 +; KNL-NEXT: vpcmpeqb %ymm4, %ymm2, %ymm0 +; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1 +; KNL-NEXT: vpmovsxbd %xmm1, %zmm1 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 +; KNL-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm2 +; KNL-NEXT: vpmovsxbd %xmm2, %zmm3 +; KNL-NEXT: vextracti128 $1, %ymm2, %xmm2 +; KNL-NEXT: vpmovsxbd %xmm2, %zmm2 +; KNL-NEXT: vptestmd %zmm2, %zmm2, %k0 {%k3} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: shll $16, %eax +; KNL-NEXT: vptestmd %zmm3, %zmm3, %k0 {%k4} +; KNL-NEXT: kmovw %k0, %ecx +; KNL-NEXT: orl %eax, %ecx +; KNL-NEXT: shlq $32, %rcx +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k2} +; KNL-NEXT: kmovw %k0, %edx +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: shll $16, %eax +; KNL-NEXT: orl %edx, %eax +; KNL-NEXT: orq %rcx, %rax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftr_v64i1_1: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmb %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftrq $1, %k0, %k1 +; SKX-NEXT: vptestnmb %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovq %k0, %rax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <64 x i8> %x, zeroinitializer + %b = shufflevector <64 x i1> %a, <64 x i1> zeroinitializer, <64 x i32> + %c = icmp eq <64 x i8> %y, zeroinitializer + %d = and <64 x i1> %b, %c + %e = bitcast <64 x i1> %d to i64 + ret i64 %e +} + +define i8 @kshiftr_v8i1_7(<8 x i64> %x, <8 x i64> %y) { +; KNL-LABEL: kshiftr_v8i1_7: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: movb $-2, %al +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $al killed $al killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftr_v8i1_7: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftlb $1, %k0, %k1 +; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <8 x i64> %x, zeroinitializer + %b = shufflevector <8 x i1> %a, <8 x i1> zeroinitializer, <8 x i32> + %c = icmp eq <8 x i64> %y, zeroinitializer + %d = and <8 x i1> %b, %c + %e = bitcast <8 x i1> %d to i8 + ret i8 %e +} + +define i16 @kshiftr_v16i1_15(<16 x i32> %x, <16 x i32> %y) { +; KNL-LABEL: kshiftr_v16i1_15: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k0 +; KNL-NEXT: kshiftrw $15, %k0, %k1 +; KNL-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $ax killed $ax killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftr_v16i1_15: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmd %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftrw $15, %k0, %k1 +; SKX-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <16 x i32> %x, zeroinitializer + %b = shufflevector <16 x i1> zeroinitializer, <16 x i1> %a, <16 x i32> + %c = icmp eq <16 x i32> %y, zeroinitializer + %d = and <16 x i1> %b, %c + %e = bitcast <16 x i1> %d to i16 + ret i16 %e +} + +define i32 @kshiftr_v32i1_31(<32 x i16> %x, <32 x i16> %y) { +; KNL-LABEL: kshiftr_v32i1_31: +; KNL: # %bb.0: +; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; KNL-NEXT: vpcmpeqw %ymm0, %ymm1, %ymm1 +; KNL-NEXT: vpmovsxwd %ymm1, %zmm1 +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 +; KNL-NEXT: kshiftrw $15, %k0, %k1 +; KNL-NEXT: vpcmpeqw %ymm0, %ymm2, %ymm0 +; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftr_v32i1_31: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmw %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftrd $31, %k0, %k1 +; SKX-NEXT: vptestnmw %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <32 x i16> %x, zeroinitializer + %b = shufflevector <32 x i1> zeroinitializer, <32 x i1> %a, <32 x i32> + %c = icmp eq <32 x i16> %y, zeroinitializer + %d = and <32 x i1> %b, %c + %e = bitcast <32 x i1> %d to i32 + ret i32 %e +} + +define i64 @kshiftr_v64i1_63(<64 x i8> %x, <64 x i8> %y) { +; KNL-LABEL: kshiftr_v64i1_63: +; KNL: # %bb.0: +; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; KNL-NEXT: vpcmpeqb %ymm0, %ymm1, %ymm1 +; KNL-NEXT: vextracti128 $1, %ymm1, %xmm1 +; KNL-NEXT: vpmovsxbd %xmm1, %zmm1 +; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 +; KNL-NEXT: kshiftrw $15, %k0, %k1 +; KNL-NEXT: vpcmpeqb %ymm0, %ymm2, %ymm0 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: movzwl %ax, %eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftr_v64i1_63: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmb %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftrq $63, %k0, %k1 +; SKX-NEXT: vptestnmb %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovq %k0, %rax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <64 x i8> %x, zeroinitializer + %b = shufflevector <64 x i1> zeroinitializer, <64 x i1> %a, <64 x i32> + %c = icmp eq <64 x i8> %y, zeroinitializer + %d = and <64 x i1> %b, %c + %e = bitcast <64 x i1> %d to i64 + ret i64 %e +} + +define i8 @kshiftl_v8i1_zu123u56(<8 x i64> %x, <8 x i64> %y) { +; KNL-LABEL: kshiftl_v8i1_zu123u56: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z} +; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = <8,u,1,2,3,u,5,6> +; KNL-NEXT: vpermi2q %zmm0, %zmm2, %zmm3 +; KNL-NEXT: vpsllq $63, %zmm3, %zmm0 +; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $al killed $al killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftl_v8i1_zu123u56: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftlb $1, %k0, %k1 +; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <8 x i64> %x, zeroinitializer + %b = shufflevector <8 x i1> %a, <8 x i1> zeroinitializer, <8 x i32> + %c = icmp eq <8 x i64> %y, zeroinitializer + %d = and <8 x i1> %b, %c + %e = bitcast <8 x i1> %d to i8 + ret i8 %e +} + +define i8 @kshiftl_v8i1_u0123456(<8 x i64> %x, <8 x i64> %y) { +; KNL-LABEL: kshiftl_v8i1_u0123456: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: valignq {{.*#+}} zmm0 = zmm0[7,0,1,2,3,4,5,6] +; KNL-NEXT: vpsllq $63, %zmm0, %zmm0 +; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $al killed $al killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftl_v8i1_u0123456: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftlb $1, %k0, %k1 +; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <8 x i64> %x, zeroinitializer + %b = shufflevector <8 x i1> %a, <8 x i1> undef, <8 x i32> + %c = icmp eq <8 x i64> %y, zeroinitializer + %d = and <8 x i1> %b, %c + %e = bitcast <8 x i1> %d to i8 + ret i8 %e +} + +define i8 @kshiftr_v8i1_1u3u567z(<8 x i64> %x, <8 x i64> %y) { +; KNL-LABEL: kshiftr_v8i1_1u3u567z: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z} +; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = <1,u,3,u,5,6,7,15> +; KNL-NEXT: vpermi2q %zmm0, %zmm2, %zmm3 +; KNL-NEXT: vpsllq $63, %zmm3, %zmm0 +; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $al killed $al killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftr_v8i1_1u3u567z: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftrb $1, %k0, %k1 +; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <8 x i64> %x, zeroinitializer + %b = shufflevector <8 x i1> %a, <8 x i1> zeroinitializer, <8 x i32> + %c = icmp eq <8 x i64> %y, zeroinitializer + %d = and <8 x i1> %b, %c + %e = bitcast <8 x i1> %d to i8 + ret i8 %e +} + +define i8 @kshiftr_v8i1_234567uu(<8 x i64> %x, <8 x i64> %y) { +; KNL-LABEL: kshiftr_v8i1_234567uu: +; KNL: # %bb.0: +; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,4,5,6,7,0,1] +; KNL-NEXT: vpsllq $63, %zmm0, %zmm0 +; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 +; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: # kill: def $al killed $al killed $eax +; KNL-NEXT: vzeroupper +; KNL-NEXT: retq +; +; SKX-LABEL: kshiftr_v8i1_234567uu: +; SKX: # %bb.0: +; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0 +; SKX-NEXT: kshiftrb $2, %k0, %k1 +; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1} +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq + %a = icmp eq <8 x i64> %x, zeroinitializer + %b = shufflevector <8 x i1> %a, <8 x i1> undef, <8 x i32> + %c = icmp eq <8 x i64> %y, zeroinitializer + %d = and <8 x i1> %b, %c + %e = bitcast <8 x i1> %d to i8 + ret i8 %e +}