Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -4660,7 +4660,8 @@ } /// Insert i1-subvector to i1-vector. -static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) { +static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { SDLoc dl(Op); SDValue Vec = Op.getOperand(0); @@ -4690,43 +4691,65 @@ // 3. Subvector should be inserted in the middle (for example v2i1 // to v16i1, index 2) + // extend to natively supported kshift + MVT WideOpVT = OpVT; + if(Subtarget.hasDQI() && OpVT.getSizeInBits() <= 8) + WideOpVT = MVT::v8i1; + else if(OpVT.getSizeInBits() <= 16) + WideOpVT = MVT::v16i1; + else if(!Subtarget.hasDQI() && OpVT == MVT::v32i1) + WideOpVT = MVT::v64i1; + SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl); - SDValue Undef = DAG.getUNDEF(OpVT); - SDValue WideSubVec = - DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef, SubVec, ZeroIdx); - if (Vec.isUndef()) - return DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec, - DAG.getConstant(IdxVal, dl, MVT::i8)); + SDValue Undef = DAG.getUNDEF(WideOpVT); + SDValue WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, + Undef, SubVec, ZeroIdx); + if (Vec.isUndef()) { + SDValue ShiftBits = DAG.getConstant(IdxVal, dl, MVT::i8); + Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, ShiftBits); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Vec,ZeroIdx); + } if (ISD::isBuildVectorAllZeros(Vec.getNode())) { + NumElems = WideOpVT.getVectorNumElements(); unsigned ShiftLeft = NumElems - SubVecNumElems; unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal; - WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec, - DAG.getConstant(ShiftLeft, dl, MVT::i8)); - return ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, OpVT, WideSubVec, + WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, + DAG.getConstant(ShiftLeft, dl, MVT::i8)); + WideSubVec = ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, WideSubVec, DAG.getConstant(ShiftRight, dl, MVT::i8)) : WideSubVec; + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, WideSubVec, ZeroIdx); } if (IdxVal == 0) { // Zero lower bits of the Vec SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8); - Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits); - Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits); - // Merge them together - return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec); + Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx); + Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits); + Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits); + // Merge them together, SubVec should be zero extended. + WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, + getZeroVector(WideOpVT, Subtarget, DAG, dl), + SubVec, ZeroIdx); + Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Vec, ZeroIdx); } // Simple case when we put subvector in the upper part if (IdxVal + SubVecNumElems == NumElems) { // Zero upper bits of the Vec - WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, - DAG.getConstant(IdxVal, dl, MVT::i8)); + WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, + DAG.getConstant(IdxVal, dl, MVT::i8)); SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8); - Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits); - Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits); - return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec); + Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx); + Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits); + Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits); + Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Vec, ZeroIdx); } // Subvector should be inserted in the middle - use shuffle + WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef, + SubVec, ZeroIdx); SmallVector Mask; for (unsigned i = 0; i < NumElems; ++i) Mask.push_back(i >= IdxVal && i < IdxVal + SubVecNumElems ? @@ -12557,7 +12580,7 @@ return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl); if (OpVT.getVectorElementType() == MVT::i1) - return Insert1BitVector(Op, DAG); + return Insert1BitVector(Op, DAG, Subtarget); return SDValue(); } Index: lib/Target/X86/X86InstrAVX512.td =================================================================== --- lib/Target/X86/X86InstrAVX512.td +++ lib/Target/X86/X86InstrAVX512.td @@ -2482,88 +2482,53 @@ def : Pat<(i1 1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>; def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>; } -def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))), - (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>; -def : Pat<(v8i1 (extract_subvector (v32i1 VK32:$src), (iPTR 0))), - (v8i1 (COPY_TO_REGCLASS VK32:$src, VK8))>; -def : Pat<(v8i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))), - (v8i1 (COPY_TO_REGCLASS VK64:$src, VK8))>; -def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))), - (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>; +// Patterns for kmask insert_subvector/extruct_subvector to/from index=0 +multiclass operation_subvector_mask_lowering { + def : Pat<(subVT (extract_subvector (VT RC:$src), (iPTR 0))), + (subVT (COPY_TO_REGCLASS RC:$src, subRC))>; + + def : Pat<(VT (insert_subvector undef, subRC:$src, (iPTR 0))), + (VT (COPY_TO_REGCLASS subRC:$src, RC))>; +} + +defm : operation_subvector_mask_lowering; +defm : operation_subvector_mask_lowering; +defm : operation_subvector_mask_lowering; +defm : operation_subvector_mask_lowering; +defm : operation_subvector_mask_lowering; + +defm : operation_subvector_mask_lowering; +defm : operation_subvector_mask_lowering; +defm : operation_subvector_mask_lowering; +defm : operation_subvector_mask_lowering; + +defm : operation_subvector_mask_lowering; +defm : operation_subvector_mask_lowering; +defm : operation_subvector_mask_lowering; + +defm : operation_subvector_mask_lowering; +defm : operation_subvector_mask_lowering; -def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 0))), - (v16i1 (COPY_TO_REGCLASS VK32:$src, VK16))>; -def : Pat<(v16i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))), - (v16i1 (COPY_TO_REGCLASS VK64:$src, VK16))>; +defm : operation_subvector_mask_lowering; +def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))), + (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>; def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))), (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>; - -def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))), - (v32i1 (COPY_TO_REGCLASS VK64:$src, VK32))>; - def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))), (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>; -def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))), - (v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>; - -def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))), - (v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>; - -def : Pat<(v4i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))), - (v4i1 (COPY_TO_REGCLASS VK2:$src, VK4))>; - -def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))), - (v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>; -def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))), - (v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>; - -def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))), - (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>; - -def : Pat<(v32i1 (insert_subvector undef, VK2:$src, (iPTR 0))), - (v32i1 (COPY_TO_REGCLASS VK2:$src, VK32))>; -def : Pat<(v32i1 (insert_subvector undef, VK4:$src, (iPTR 0))), - (v32i1 (COPY_TO_REGCLASS VK4:$src, VK32))>; -def : Pat<(v32i1 (insert_subvector undef, VK8:$src, (iPTR 0))), - (v32i1 (COPY_TO_REGCLASS VK8:$src, VK32))>; -def : Pat<(v32i1 (insert_subvector undef, VK16:$src, (iPTR 0))), - (v32i1 (COPY_TO_REGCLASS VK16:$src, VK32))>; - -def : Pat<(v64i1 (insert_subvector undef, VK2:$src, (iPTR 0))), - (v64i1 (COPY_TO_REGCLASS VK2:$src, VK64))>; -def : Pat<(v64i1 (insert_subvector undef, VK4:$src, (iPTR 0))), - (v64i1 (COPY_TO_REGCLASS VK4:$src, VK64))>; -def : Pat<(v64i1 (insert_subvector undef, VK8:$src, (iPTR 0))), - (v64i1 (COPY_TO_REGCLASS VK8:$src, VK64))>; -def : Pat<(v64i1 (insert_subvector undef, VK16:$src, (iPTR 0))), - (v64i1 (COPY_TO_REGCLASS VK16:$src, VK64))>; -def : Pat<(v64i1 (insert_subvector undef, VK32:$src, (iPTR 0))), - (v64i1 (COPY_TO_REGCLASS VK32:$src, VK64))>; - - def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))), (v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>; -def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))), - (v8i1 (COPY_TO_REGCLASS - (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), - (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>; - def : Pat<(v4i1 (X86vshli VK4:$src, (i8 imm:$imm))), (v4i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK4:$src, VK16), (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>; - -def : Pat<(v4i1 (X86vsrli VK4:$src, (i8 imm:$imm))), - (v4i1 (COPY_TO_REGCLASS - (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16), - (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>; - //===----------------------------------------------------------------------===// // AVX-512 - Aligned and unaligned load and store // Index: test/CodeGen/X86/avx512-skx-insert-subvec.ll =================================================================== --- test/CodeGen/X86/avx512-skx-insert-subvec.ll +++ test/CodeGen/X86/avx512-skx-insert-subvec.ll @@ -60,9 +60,12 @@ ; CHECK: # BB#0: ; CHECK-NEXT: vpslld $31, %xmm0, %xmm0 ; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0 +; CHECK-NEXT: vpslld $31, %xmm1, %xmm0 +; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k1 +; CHECK-NEXT: kshiftlb $4, %k1, %k1 ; CHECK-NEXT: kshiftlb $4, %k0, %k0 -; CHECK-NEXT: kshiftrb $4, %k0, %k1 -; CHECK-NEXT: korb %k0, %k1, %k0 +; CHECK-NEXT: kshiftrb $4, %k0, %k0 +; CHECK-NEXT: korb %k1, %k0, %k0 ; CHECK-NEXT: vpmovm2w %k0, %xmm0 ; CHECK-NEXT: retq @@ -75,9 +78,12 @@ ; CHECK: # BB#0: ; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0 ; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0 -; CHECK-NEXT: kshiftlw $2, %k0, %k0 -; CHECK-NEXT: kshiftrw $2, %k0, %k1 -; CHECK-NEXT: korw %k0, %k1, %k0 +; CHECK-NEXT: vpsllq $63, %xmm1, %xmm0 +; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k1 +; CHECK-NEXT: kshiftlb $2, %k1, %k1 +; CHECK-NEXT: kshiftlb $2, %k0, %k0 +; CHECK-NEXT: kshiftrb $2, %k0, %k0 +; CHECK-NEXT: korb %k1, %k0, %k0 ; CHECK-NEXT: vpmovm2d %k0, %xmm0 ; CHECK-NEXT: retq @@ -90,9 +96,12 @@ ; CHECK: # BB#0: ; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0 ; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0 -; CHECK-NEXT: kshiftlw $2, %k0, %k0 -; CHECK-NEXT: kshiftrw $2, %k0, %k1 -; CHECK-NEXT: korw %k0, %k1, %k0 +; CHECK-NEXT: vpsllq $63, %xmm1, %xmm0 +; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k1 +; CHECK-NEXT: kshiftlb $2, %k1, %k1 +; CHECK-NEXT: kshiftlb $2, %k0, %k0 +; CHECK-NEXT: kshiftrb $2, %k0, %k0 +; CHECK-NEXT: korb %k1, %k0, %k0 ; CHECK-NEXT: kunpckbw %k0, %k0, %k0 ; CHECK-NEXT: vpmovm2b %k0, %xmm0 ; CHECK-NEXT: retq @@ -106,9 +115,12 @@ ; CHECK: # BB#0: ; CHECK-NEXT: vpslld $31, %xmm0, %xmm0 ; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0 +; CHECK-NEXT: vpslld $31, %xmm1, %xmm0 +; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k1 +; CHECK-NEXT: kshiftlb $4, %k1, %k1 ; CHECK-NEXT: kshiftlb $4, %k0, %k0 -; CHECK-NEXT: kshiftrb $4, %k0, %k1 -; CHECK-NEXT: korb %k0, %k1, %k0 +; CHECK-NEXT: kshiftrb $4, %k0, %k0 +; CHECK-NEXT: korb %k1, %k0, %k0 ; CHECK-NEXT: kunpckbw %k0, %k0, %k0 ; CHECK-NEXT: kunpckwd %k0, %k0, %k0 ; CHECK-NEXT: vpmovm2b %k0, %ymm0 Index: test/CodeGen/X86/masked_gather_scatter.ll =================================================================== --- test/CodeGen/X86/masked_gather_scatter.ll +++ test/CodeGen/X86/masked_gather_scatter.ll @@ -939,8 +939,8 @@ ; SKX: # BB#0: ; SKX-NEXT: vpsllq $63, %xmm2, %xmm2 ; SKX-NEXT: vptestmq %xmm2, %xmm2, %k0 -; SKX-NEXT: kshiftlw $2, %k0, %k0 -; SKX-NEXT: kshiftrw $2, %k0, %k1 +; SKX-NEXT: kshiftlb $6, %k0, %k0 +; SKX-NEXT: kshiftrb $6, %k0, %k1 ; SKX-NEXT: vscatterqps %xmm0, (,%ymm1) {%k1} ; SKX-NEXT: retq ; @@ -949,8 +949,8 @@ ; SKX_32-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SKX_32-NEXT: vpsllq $63, %xmm2, %xmm2 ; SKX_32-NEXT: vptestmq %xmm2, %xmm2, %k0 -; SKX_32-NEXT: kshiftlw $2, %k0, %k0 -; SKX_32-NEXT: kshiftrw $2, %k0, %k1 +; SKX_32-NEXT: kshiftlb $6, %k0, %k0 +; SKX_32-NEXT: kshiftrb $6, %k0, %k1 ; SKX_32-NEXT: vscatterdps %xmm0, (,%xmm1) {%k1} ; SKX_32-NEXT: retl call void @llvm.masked.scatter.v2f32(<2 x float> %a1, <2 x float*> %ptr, i32 4, <2 x i1> %mask) @@ -984,8 +984,8 @@ ; SKX: # BB#0: ; SKX-NEXT: vpsllq $63, %xmm2, %xmm2 ; SKX-NEXT: vptestmq %xmm2, %xmm2, %k0 -; SKX-NEXT: kshiftlw $2, %k0, %k0 -; SKX-NEXT: kshiftrw $2, %k0, %k1 +; SKX-NEXT: kshiftlb $6, %k0, %k0 +; SKX-NEXT: kshiftrb $6, %k0, %k1 ; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SKX-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1} ; SKX-NEXT: retq @@ -994,8 +994,8 @@ ; SKX_32: # BB#0: ; SKX_32-NEXT: vpsllq $63, %xmm2, %xmm2 ; SKX_32-NEXT: vptestmq %xmm2, %xmm2, %k0 -; SKX_32-NEXT: kshiftlw $2, %k0, %k0 -; SKX_32-NEXT: kshiftrw $2, %k0, %k1 +; SKX_32-NEXT: kshiftlb $6, %k0, %k0 +; SKX_32-NEXT: kshiftrb $6, %k0, %k1 ; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SKX_32-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1} ; SKX_32-NEXT: retl @@ -1043,8 +1043,8 @@ ; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SKX-NEXT: vpsllq $63, %xmm1, %xmm1 ; SKX-NEXT: vptestmq %xmm1, %xmm1, %k0 -; SKX-NEXT: kshiftlw $2, %k0, %k0 -; SKX-NEXT: kshiftrw $2, %k0, %k1 +; SKX-NEXT: kshiftlb $6, %k0, %k0 +; SKX-NEXT: kshiftrb $6, %k0, %k1 ; SKX-NEXT: vgatherdps (%rdi,%xmm0,4), %xmm2 {%k1} ; SKX-NEXT: vmovaps %zmm2, %zmm0 ; SKX-NEXT: retq @@ -1054,8 +1054,8 @@ ; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SKX_32-NEXT: vpsllq $63, %xmm1, %xmm1 ; SKX_32-NEXT: vptestmq %xmm1, %xmm1, %k0 -; SKX_32-NEXT: kshiftlw $2, %k0, %k0 -; SKX_32-NEXT: kshiftrw $2, %k0, %k1 +; SKX_32-NEXT: kshiftlb $6, %k0, %k0 +; SKX_32-NEXT: kshiftrb $6, %k0, %k1 ; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax ; SKX_32-NEXT: vgatherdps (%eax,%xmm0,4), %xmm2 {%k1} ; SKX_32-NEXT: vmovaps %zmm2, %zmm0 Index: test/CodeGen/X86/masked_memop.ll =================================================================== --- test/CodeGen/X86/masked_memop.ll +++ test/CodeGen/X86/masked_memop.ll @@ -707,8 +707,8 @@ ; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0 -; SKX-NEXT: kshiftlw $2, %k0, %k0 -; SKX-NEXT: kshiftrw $2, %k0, %k1 +; SKX-NEXT: kshiftlw $14, %k0, %k0 +; SKX-NEXT: kshiftrw $14, %k0, %k1 ; SKX-NEXT: vmovups %xmm1, (%rdi) {%k1} ; SKX-NEXT: retq %mask = icmp eq <2 x i32> %trigger, zeroinitializer @@ -801,8 +801,8 @@ ; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0 -; SKX-NEXT: kshiftlw $2, %k0, %k0 -; SKX-NEXT: kshiftrw $2, %k0, %k1 +; SKX-NEXT: kshiftlw $14, %k0, %k0 +; SKX-NEXT: kshiftrw $14, %k0, %k1 ; SKX-NEXT: vmovups (%rdi), %xmm1 {%k1} ; SKX-NEXT: vmovaps %zmm1, %zmm0 ; SKX-NEXT: retq @@ -856,8 +856,8 @@ ; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0 -; SKX-NEXT: kshiftlw $2, %k0, %k0 -; SKX-NEXT: kshiftrw $2, %k0, %k1 +; SKX-NEXT: kshiftlw $14, %k0, %k0 +; SKX-NEXT: kshiftrw $14, %k0, %k1 ; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; SKX-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} ; SKX-NEXT: vpmovsxdq %xmm0, %xmm0 @@ -903,8 +903,8 @@ ; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] ; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 -; SKX-NEXT: kshiftlw $2, %k0, %k0 -; SKX-NEXT: kshiftrw $2, %k0, %k1 +; SKX-NEXT: kshiftlw $14, %k0, %k0 +; SKX-NEXT: kshiftrw $14, %k0, %k1 ; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1} {z} ; SKX-NEXT: retq %mask = icmp eq <2 x i32> %trigger, zeroinitializer