Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -34275,6 +34275,31 @@ // TODO convert SrcUndef to KnownUndef. break; } + case X86ISD::KSHIFTL: + case X86ISD::KSHIFTR: { + auto *Amt = dyn_cast(Op.getOperand(1)); + if (!Amt || Amt->getAPIntValue().uge(NumElts)) + break; + SDValue Src = Op.getOperand(0); + bool ShiftLeft = (X86ISD::KSHIFTL == Opc); + unsigned ShiftAmt = Amt->getZExtValue(); + APInt DemandedSrc = ShiftLeft ? DemandedElts.lshr(ShiftAmt) + : DemandedElts.shl(ShiftAmt); + if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO, + Depth + 1)) + return true; + + if (ShiftLeft) { + KnownUndef = KnownUndef.shl(ShiftAmt); + KnownZero = KnownZero.shl(ShiftAmt); + KnownZero.setLowBits(ShiftAmt); + } else { + KnownUndef = KnownUndef.lshr(ShiftAmt); + KnownZero = KnownZero.lshr(ShiftAmt); + KnownZero.setHighBits(ShiftAmt); + } + break; + } case X86ISD::CVTSI2P: case X86ISD::CVTUI2P: { SDValue Src = Op.getOperand(0); Index: test/CodeGen/X86/prefer-avx256-mask-shuffle.ll =================================================================== --- test/CodeGen/X86/prefer-avx256-mask-shuffle.ll +++ test/CodeGen/X86/prefer-avx256-mask-shuffle.ll @@ -153,9 +153,7 @@ ; AVX256VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,12,13,u,u,8,9,6,7,14,15,14,15,0,1,22,23,28,29,18,19,26,27,22,23,u,u,30,31,16,17] ; AVX256VL-NEXT: vmovdqa32 %ymm0, %ymm2 {%k1} {z} ; AVX256VL-NEXT: vpmovdw %ymm2, %xmm2 -; AVX256VL-NEXT: kshiftrw $8, %k1, %k1 -; AVX256VL-NEXT: vmovdqa32 %ymm0, %ymm3 {%k1} {z} -; AVX256VL-NEXT: vpmovdw %ymm3, %xmm3 +; AVX256VL-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; AVX256VL-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 ; AVX256VL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,1,2,1] ; AVX256VL-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255]