Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1553,6 +1553,8 @@ setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i1, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i1, Custom); setOperationAction(ISD::SELECT, MVT::v4i1, Custom); setOperationAction(ISD::SELECT, MVT::v2i1, Custom); setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); @@ -12176,8 +12178,15 @@ V2 = getOnesVector(ExtVT, Subtarget, DAG, DL); else V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2); - return DAG.getNode(ISD::TRUNCATE, DL, VT, - DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask)); + + SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask); + // i1 was sign extended we can use X86ISD::CVT2MASK. + int NumElems = VT.getVectorNumElements(); + if ((Subtarget.hasBWI() && (NumElems >= 32)) || + (Subtarget.hasDQI() && (NumElems < 32))) + return DAG.getNode(X86ISD::CVT2MASK, DL, VT, Shuffle); + + return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle); } /// Helper function that returns true if the shuffle mask should be @@ -12628,15 +12637,16 @@ DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx); return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp); } - + // Use shuffle to insert element. unsigned IdxVal = cast(Idx)->getZExtValue(); SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt); - if (IdxVal) - EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec, - DAG.getConstant(IdxVal, dl, MVT::i8)); - if (Vec.isUndef()) - return EltInVec; - return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec); + + unsigned NumElems = VecVT.getVectorNumElements(); + SmallVector MaskVec(NumElems); + for (unsigned i = 0; i != NumElems; ++i) + MaskVec[i] = (i == IdxVal) ? NumElems : i; + + return DAG.getVectorShuffle(VecVT, dl, Vec, EltInVec, MaskVec); } SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, Index: test/CodeGen/X86/avx512-ext.ll =================================================================== --- test/CodeGen/X86/avx512-ext.ll +++ test/CodeGen/X86/avx512-ext.ll @@ -1423,15 +1423,35 @@ define i16 @trunc_i32_to_i1(i32 %a) { -; ALL-LABEL: trunc_i32_to_i1: -; ALL: ## BB#0: -; ALL-NEXT: andl $1, %edi -; ALL-NEXT: kmovw %edi, %k0 -; ALL-NEXT: movw $-4, %ax -; ALL-NEXT: kmovw %eax, %k1 -; ALL-NEXT: korw %k0, %k1, %k0 -; ALL-NEXT: kmovw %k0, %eax -; ALL-NEXT: retq +; KNL-LABEL: trunc_i32_to_i1: +; KNL: ## BB#0: +; KNL-NEXT: andl $1, %edi +; KNL-NEXT: kmovw %edi, %k1 +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 +; KNL-NEXT: movw $-4, %ax +; KNL-NEXT: kmovw %eax, %k2 +; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k2} {z} +; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [16,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] +; KNL-NEXT: vpermt2d %zmm0, %zmm2, %zmm1 +; KNL-NEXT: vpslld $31, %zmm1, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: retq +; +; SKX-LABEL: trunc_i32_to_i1: +; SKX: ## BB#0: +; SKX-NEXT: andl $1, %edi +; SKX-NEXT: kmovw %edi, %k0 +; SKX-NEXT: movw $-4, %ax +; SKX-NEXT: kmovw %eax, %k1 +; SKX-NEXT: vpmovm2d %k1, %zmm0 +; SKX-NEXT: vpmovm2d %k0, %zmm1 +; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [16,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] +; SKX-NEXT: vpermt2d %zmm1, %zmm2, %zmm0 +; SKX-NEXT: vpmovd2m %zmm0, %k0 +; SKX-NEXT: kmovw %k0, %eax +; SKX-NEXT: retq %a_i = trunc i32 %a to i1 %maskv = insertelement <16 x i1> , i1 %a_i, i32 0 %res = bitcast <16 x i1> %maskv to i16 Index: test/CodeGen/X86/avx512-insert-extract.ll =================================================================== --- test/CodeGen/X86/avx512-insert-extract.ll +++ test/CodeGen/X86/avx512-insert-extract.ll @@ -271,10 +271,16 @@ ; KNL: ## BB#0: ; KNL-NEXT: cmpl %esi, %edi ; KNL-NEXT: setb %al -; KNL-NEXT: kmovw %eax, %k0 -; KNL-NEXT: movw $-4, %ax ; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: korw %k0, %k1, %k0 +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 +; KNL-NEXT: movw $-4, %ax +; KNL-NEXT: kmovw %eax, %k2 +; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k2} {z} +; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [16,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] +; KNL-NEXT: vpermt2d %zmm0, %zmm2, %zmm1 +; KNL-NEXT: vpslld $31, %zmm1, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: retq ; @@ -283,9 +289,13 @@ ; SKX-NEXT: cmpl %esi, %edi ; SKX-NEXT: setb %al ; SKX-NEXT: kmovw %eax, %k0 +; SKX-NEXT: vpmovm2d %k0, %zmm0 ; SKX-NEXT: movw $-4, %ax -; SKX-NEXT: kmovw %eax, %k1 -; SKX-NEXT: korw %k0, %k1, %k0 +; SKX-NEXT: kmovw %eax, %k0 +; SKX-NEXT: vpmovm2d %k0, %zmm1 +; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [16,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] +; SKX-NEXT: vpermt2d %zmm0, %zmm2, %zmm1 +; SKX-NEXT: vpmovd2m %zmm1, %k0 ; SKX-NEXT: kmovw %k0, %eax ; SKX-NEXT: retq %cmp_res = icmp ult i32 %a, %b @@ -351,10 +361,15 @@ ; KNL: ## BB#0: ; KNL-NEXT: movzbl (%rdi), %eax ; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k0 -; KNL-NEXT: kmovw %esi, %k1 -; KNL-NEXT: kshiftlw $10, %k0, %k0 -; KNL-NEXT: korw %k0, %k1, %k0 +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: kmovw %esi, %k2 +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 +; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k2} {z} +; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15] +; KNL-NEXT: vpermt2d %zmm0, %zmm2, %zmm1 +; KNL-NEXT: vpslld $31, %zmm1, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: retq ; @@ -364,8 +379,11 @@ ; SKX-NEXT: andl $1, %eax ; SKX-NEXT: kmovd %eax, %k0 ; SKX-NEXT: kmovw %esi, %k1 -; SKX-NEXT: kshiftlw $10, %k0, %k0 -; SKX-NEXT: korw %k0, %k1, %k0 +; SKX-NEXT: vpmovm2d %k1, %zmm0 +; SKX-NEXT: vpmovm2d %k0, %zmm1 +; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15] +; SKX-NEXT: vpermt2d %zmm1, %zmm2, %zmm0 +; SKX-NEXT: vpmovd2m %zmm0, %k0 ; SKX-NEXT: kmovw %k0, %eax ; SKX-NEXT: retq %x = load i1 , i1 * %addr, align 128 @@ -380,10 +398,15 @@ ; KNL: ## BB#0: ; KNL-NEXT: movzbl (%rdi), %eax ; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k0 -; KNL-NEXT: kmovw %esi, %k1 -; KNL-NEXT: kshiftlw $4, %k0, %k0 -; KNL-NEXT: korw %k0, %k1, %k0 +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: kmovw %esi, %k2 +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 +; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k2} {z} +; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7] +; KNL-NEXT: vpermt2q %zmm0, %zmm2, %zmm1 +; KNL-NEXT: vpsllq $63, %zmm1, %zmm0 +; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: retq ; @@ -393,8 +416,11 @@ ; SKX-NEXT: andl $1, %eax ; SKX-NEXT: kmovd %eax, %k0 ; SKX-NEXT: kmovb %esi, %k1 -; SKX-NEXT: kshiftlb $4, %k0, %k0 -; SKX-NEXT: korb %k0, %k1, %k0 +; SKX-NEXT: vpmovm2q %k1, %zmm0 +; SKX-NEXT: vpmovm2q %k0, %zmm1 +; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7] +; SKX-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 +; SKX-NEXT: vpmovq2m %zmm0, %k0 ; SKX-NEXT: kmovb %k0, %eax ; SKX-NEXT: retq %x = load i1 , i1 * %addr, align 128 @@ -1011,3 +1037,48 @@ %r = insertelement <32 x i8> %x, i8 %y, i32 20 ret <32 x i8> %r } + +define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32> %y) { +; SKX-LABEL: test_insertelement_v32i1: +; SKX: ## BB#0: +; SKX-NEXT: cmpl %esi, %edi +; SKX-NEXT: setb %al +; SKX-NEXT: kmovw %eax, %k0 +; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k1 +; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k2 +; SKX-NEXT: kunpckwd %k1, %k2, %k1 +; SKX-NEXT: vpmovm2w %k1, %zmm0 +; SKX-NEXT: vpmovm2w %k0, %zmm1 +; SKX-NEXT: vmovdqu16 {{.*#+}} zmm2 = [0,1,2,3,32,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31] +; SKX-NEXT: vpermt2w %zmm1, %zmm2, %zmm0 +; SKX-NEXT: vpmovw2m %zmm0, %k0 +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: retq + %cmp_res_i1 = icmp ult i32 %a, %b + %cmp_cmp_vec = icmp ult <32 x i32> %x, %y + %maskv = insertelement <32 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 4 + %res = bitcast <32 x i1> %maskv to i32 + ret i32 %res +} + +define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) { +; SKX-LABEL: test_iinsertelement_v4i1: +; SKX: ## BB#0: +; SKX-NEXT: cmpl %esi, %edi +; SKX-NEXT: setb %al +; SKX-NEXT: kmovw %eax, %k0 +; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k1 +; SKX-NEXT: vpmovm2d %k1, %xmm0 +; SKX-NEXT: vpmovm2d %k0, %xmm1 +; SKX-NEXT: vpbroadcastq %xmm1, %xmm1 +; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3] +; SKX-NEXT: vpmovd2m %xmm0, %k0 +; SKX-NEXT: kmovb %k0, %eax +; SKX-NEXT: retq + %cmp_res_i1 = icmp ult i32 %a, %b + %cmp_cmp_vec = icmp ult <4 x i32> %x, %y + %maskv = insertelement <4 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 2 + %res0 = shufflevector <4 x i1> %maskv, <4 x i1> undef , <8 x i32> + %res = bitcast <8 x i1> %res0 to i8 + ret i8 %res +} Index: test/CodeGen/X86/avx512-mask-op.ll =================================================================== --- test/CodeGen/X86/avx512-mask-op.ll +++ test/CodeGen/X86/avx512-mask-op.ll @@ -540,8 +540,14 @@ ; SKX-NEXT: kmovq %rdi, %k0 ; SKX-NEXT: kxnorw %k0, %k0, %k1 ; SKX-NEXT: kshiftrw $15, %k1, %k1 -; SKX-NEXT: kshiftlq $5, %k1, %k1 -; SKX-NEXT: korq %k1, %k0, %k0 +; SKX-NEXT: vpmovm2b %k1, %zmm0 +; SKX-NEXT: vpsllq $40, %xmm0, %xmm0 +; SKX-NEXT: vpmovm2b %k0, %zmm1 +; SKX-NEXT: vmovdqu8 {{.*#+}} ymm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; SKX-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1 +; SKX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; SKX-NEXT: vpmovb2m %zmm0, %k0 ; SKX-NEXT: vpmovm2b %k0, %zmm0 ; SKX-NEXT: retq %a = bitcast i64 %x to <64 x i1> @@ -601,8 +607,14 @@ ; SKX-NEXT: cmpl %edx, %esi ; SKX-NEXT: setg %al ; SKX-NEXT: kmovw %eax, %k1 -; SKX-NEXT: kshiftlq $5, %k1, %k1 -; SKX-NEXT: korq %k1, %k0, %k0 +; SKX-NEXT: vpmovm2b %k1, %zmm0 +; SKX-NEXT: vpsllq $40, %xmm0, %xmm0 +; SKX-NEXT: vpmovm2b %k0, %zmm1 +; SKX-NEXT: vmovdqu8 {{.*#+}} ymm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; SKX-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1 +; SKX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; SKX-NEXT: vpmovb2m %zmm0, %k0 ; SKX-NEXT: vpmovm2b %k0, %zmm0 ; SKX-NEXT: retq %a = bitcast i64 %x to <64 x i1> @@ -615,17 +627,25 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; KNL-LABEL: test18: ; KNL: ## BB#0: -; KNL-NEXT: kmovw %edi, %k0 -; KNL-NEXT: kmovw %esi, %k1 -; KNL-NEXT: kshiftlw $7, %k1, %k2 +; KNL-NEXT: kmovw %edi, %k1 +; KNL-NEXT: kmovw %esi, %k0 +; KNL-NEXT: kshiftlw $7, %k0, %k2 ; KNL-NEXT: kshiftrw $15, %k2, %k2 -; KNL-NEXT: kshiftlw $6, %k1, %k1 -; KNL-NEXT: kshiftrw $15, %k1, %k1 -; KNL-NEXT: kshiftlw $6, %k1, %k1 -; KNL-NEXT: korw %k1, %k0, %k0 -; KNL-NEXT: kshiftlw $7, %k2, %k1 -; KNL-NEXT: korw %k1, %k0, %k1 +; KNL-NEXT: kshiftlw $6, %k0, %k0 +; KNL-NEXT: kshiftrw $15, %k0, %k3 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 +; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} {z} +; KNL-NEXT: vmovdqa64 %zmm0, %zmm2 {%k3} {z} +; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,8,7] +; KNL-NEXT: vpermt2q %zmm2, %zmm3, %zmm1 +; KNL-NEXT: vpsllq $63, %zmm1, %zmm1 +; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 +; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} {z} +; KNL-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2} {z} +; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,6,8] +; KNL-NEXT: vpermt2q %zmm2, %zmm3, %zmm1 +; KNL-NEXT: vpsllq $63, %zmm1, %zmm1 +; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vpmovqw %zmm0, %xmm0 ; KNL-NEXT: retq @@ -634,14 +654,20 @@ ; SKX: ## BB#0: ; SKX-NEXT: kmovb %edi, %k0 ; SKX-NEXT: kmovw %esi, %k1 -; SKX-NEXT: kshiftlw $6, %k1, %k2 +; SKX-NEXT: kshiftlw $7, %k1, %k2 ; SKX-NEXT: kshiftrw $15, %k2, %k2 -; SKX-NEXT: kshiftlw $7, %k1, %k1 +; SKX-NEXT: kshiftlw $6, %k1, %k1 ; SKX-NEXT: kshiftrw $15, %k1, %k1 -; SKX-NEXT: kshiftlb $7, %k1, %k1 -; SKX-NEXT: kshiftlb $6, %k2, %k2 -; SKX-NEXT: korb %k2, %k0, %k0 -; SKX-NEXT: korb %k1, %k0, %k0 +; SKX-NEXT: vpmovm2q %k0, %zmm0 +; SKX-NEXT: vpmovm2q %k1, %zmm1 +; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7] +; SKX-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 +; SKX-NEXT: vpmovq2m %zmm0, %k0 +; SKX-NEXT: vpmovm2q %k0, %zmm0 +; SKX-NEXT: vpmovm2q %k2, %zmm1 +; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,8] +; SKX-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 +; SKX-NEXT: vpmovq2m %zmm0, %k0 ; SKX-NEXT: vpmovm2w %k0, %xmm0 ; SKX-NEXT: retq %b = bitcast i8 %a to <8 x i1> Index: test/CodeGen/X86/avx512-skx-insert-subvec.ll =================================================================== --- test/CodeGen/X86/avx512-skx-insert-subvec.ll +++ test/CodeGen/X86/avx512-skx-insert-subvec.ll @@ -33,8 +33,7 @@ ; CHECK-NEXT: vpmovm2q %k0, %zmm0 ; CHECK-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1] -; CHECK-NEXT: vpsllq $63, %zmm0, %zmm0 -; CHECK-NEXT: vptestmq %zmm0, %zmm0, %k0 +; CHECK-NEXT: vpmovq2m %zmm0, %k0 ; CHECK-NEXT: vpmovm2w %k0, %xmm0 ; CHECK-NEXT: retq %res = shufflevector <2 x i1> %a, <2 x i1> zeroinitializer, <8 x i32> Index: test/CodeGen/X86/vector-shuffle-v1.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-v1.ll +++ test/CodeGen/X86/vector-shuffle-v1.ll @@ -16,8 +16,7 @@ ; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0 ; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0 ; VL_BW_DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0 -; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0 +; VL_BW_DQ-NEXT: vpmovq2m %xmm0, %k0 ; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0 ; VL_BW_DQ-NEXT: retq %b = shufflevector <2 x i1> %a, <2 x i1> undef, <2 x i32> @@ -41,8 +40,7 @@ ; VL_BW_DQ-NEXT: kmovb %eax, %k0 ; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm1 ; VL_BW_DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] -; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0 -; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0 +; VL_BW_DQ-NEXT: vpmovq2m %xmm0, %k0 ; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0 ; VL_BW_DQ-NEXT: retq %b = shufflevector <2 x i1> %a, <2 x i1> , <2 x i32> @@ -62,8 +60,7 @@ ; VL_BW_DQ-NEXT: vptestmd %xmm0, %xmm0, %k0 ; VL_BW_DQ-NEXT: vpmovm2d %k0, %xmm0 ; VL_BW_DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,1,0] -; VL_BW_DQ-NEXT: vpslld $31, %xmm0, %xmm0 -; VL_BW_DQ-NEXT: vptestmd %xmm0, %xmm0, %k0 +; VL_BW_DQ-NEXT: vpmovd2m %xmm0, %k0 ; VL_BW_DQ-NEXT: vpmovm2d %k0, %xmm0 ; VL_BW_DQ-NEXT: retq %b = shufflevector <4 x i1> %a, <4 x i1> undef, <4 x i32> @@ -90,8 +87,7 @@ ; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0 ; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0] ; VL_BW_DQ-NEXT: vpermq %zmm0, %zmm1, %zmm0 -; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0 -; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0 +; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0 ; VL_BW_DQ-NEXT: vpmovm2w %k0, %xmm0 ; VL_BW_DQ-NEXT: retq %a2 = icmp eq <8 x i64> %a, %a1 @@ -124,8 +120,7 @@ ; VL_BW_DQ-NEXT: vpmovm2d %k0, %zmm1 ; VL_BW_DQ-NEXT: vmovdqa32 {{.*#+}} zmm2 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0] ; VL_BW_DQ-NEXT: vpermt2d %zmm0, %zmm2, %zmm1 -; VL_BW_DQ-NEXT: vpslld $31, %zmm1, %zmm0 -; VL_BW_DQ-NEXT: vptestmd %zmm0, %zmm0, %k0 +; VL_BW_DQ-NEXT: vpmovd2m %zmm1, %k0 ; VL_BW_DQ-NEXT: vpmovm2b %k0, %xmm0 ; VL_BW_DQ-NEXT: retq %a2 = icmp eq <16 x i32> %a, %a1 @@ -151,7 +146,6 @@ ; VL_BW_DQ-NEXT: vpmovm2w %k0, %zmm0 ; VL_BW_DQ-NEXT: vmovdqu16 {{.*#+}} zmm1 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0,3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0] ; VL_BW_DQ-NEXT: vpermw %zmm0, %zmm1, %zmm0 -; VL_BW_DQ-NEXT: vpsllw $15, %zmm0, %zmm0 ; VL_BW_DQ-NEXT: vpmovw2m %zmm0, %k0 ; VL_BW_DQ-NEXT: vpmovm2b %k0, %ymm0 ; VL_BW_DQ-NEXT: retq @@ -179,8 +173,7 @@ ; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0 ; VL_BW_DQ-NEXT: vextracti64x2 $1, %zmm0, %xmm0 ; VL_BW_DQ-NEXT: vpbroadcastq %xmm0, %zmm0 -; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0 -; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0 +; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0 ; VL_BW_DQ-NEXT: vpmovm2w %k0, %xmm0 ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -209,8 +202,7 @@ ; VL_BW_DQ-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = <8,2,10,u,3,u,2,u> ; VL_BW_DQ-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 -; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0 -; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0 +; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0 ; VL_BW_DQ-NEXT: kmovb %k0, %eax ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -236,8 +228,7 @@ ; VL_BW_DQ-NEXT: kmovb %edi, %k0 ; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0 ; VL_BW_DQ-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1] -; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0 -; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0 +; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0 ; VL_BW_DQ-NEXT: kmovb %k0, %eax ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -267,8 +258,7 @@ ; VL_BW_DQ-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,6,1,0,3,7,7,0] ; VL_BW_DQ-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 -; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0 -; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0 +; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0 ; VL_BW_DQ-NEXT: kmovb %k0, %eax ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -298,8 +288,7 @@ ; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7] ; VL_BW_DQ-NEXT: vpxord %zmm2, %zmm2, %zmm2 ; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2 -; VL_BW_DQ-NEXT: vpsllq $63, %zmm2, %zmm0 -; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0 +; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0 ; VL_BW_DQ-NEXT: kmovb %k0, %eax ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -333,8 +322,7 @@ ; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm1 ; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [9,6,1,0,3,7,7,1] ; VL_BW_DQ-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 -; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0 -; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0 +; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0 ; VL_BW_DQ-NEXT: kmovb %k0, %eax ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -366,8 +354,7 @@ ; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,3,4,5,6,7] ; VL_BW_DQ-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 ; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2 -; VL_BW_DQ-NEXT: vpsllq $63, %zmm2, %zmm0 -; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0 +; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0 ; VL_BW_DQ-NEXT: kmovb %k0, %eax ; VL_BW_DQ-NEXT: retq %c = shufflevector <8 x i1> , <8 x i1> %a, <8 x i32> @@ -393,8 +380,7 @@ ; VL_BW_DQ-NEXT: kmovw %edi, %k0 ; VL_BW_DQ-NEXT: vpmovm2d %k0, %zmm0 ; VL_BW_DQ-NEXT: vpbroadcastd %xmm0, %zmm0 -; VL_BW_DQ-NEXT: vpslld $31, %zmm0, %zmm0 -; VL_BW_DQ-NEXT: vptestmd %zmm0, %zmm0, %k0 +; VL_BW_DQ-NEXT: vpmovd2m %zmm0, %k0 ; VL_BW_DQ-NEXT: kmovw %k0, %eax ; VL_BW_DQ-NEXT: retq %b = bitcast i16 %a to <16 x i1> @@ -444,7 +430,6 @@ ; VL_BW_DQ-NEXT: kmovq %rdi, %k0 ; VL_BW_DQ-NEXT: vpmovm2b %k0, %zmm0 ; VL_BW_DQ-NEXT: vpbroadcastb %xmm0, %zmm0 -; VL_BW_DQ-NEXT: vpsllw $7, %zmm0, %zmm0 ; VL_BW_DQ-NEXT: vpmovb2m %zmm0, %k0 ; VL_BW_DQ-NEXT: kmovq %k0, %rax ; VL_BW_DQ-NEXT: retq