Skip to content

Commit

Permalink
[AVX512] Fix insertelement i1 lowering.
Browse files Browse the repository at this point in the history
1. Use shuffle to insert element i1 into vector. The previous implementation was incorrect ( dest_bit OR src_bit , it doesn't clear the bit if src_bit=0 )
2. Improve shuffle i1 vector, use CVT2MASK if supported instead TRUNCATE.

Differential Revision: http://reviews.llvm.org/D23347

llvm-svn: 278623
Igor Breger committed Aug 14, 2016
1 parent 98541b0 commit 8672408
Showing 7 changed files with 222 additions and 74 deletions.
54 changes: 48 additions & 6 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Original file line number Diff line number Diff line change
@@ -1550,6 +1550,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::TRUNCATE, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
@@ -12178,8 +12179,15 @@ static SDValue lower1BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
V2 = getOnesVector(ExtVT, Subtarget, DAG, DL);
else
V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
return DAG.getNode(ISD::TRUNCATE, DL, VT,
DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask));

SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
// i1 was sign extended we can use X86ISD::CVT2MASK.
int NumElems = VT.getVectorNumElements();
if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
(Subtarget.hasDQI() && (NumElems < 32)))
return DAG.getNode(X86ISD::CVT2MASK, DL, VT, Shuffle);

return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
}

/// Helper function that returns true if the shuffle mask should be
@@ -12635,12 +12643,46 @@ X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {

unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
if (IdxVal)
unsigned NumElems = VecVT.getVectorNumElements();

if(Vec.isUndef()) {
if (IdxVal)
EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
DAG.getConstant(IdxVal, dl, MVT::i8));
return EltInVec;
}

// Insertion of one bit into first or last position
// can be done with two SHIFTs + OR.
if (IdxVal == 0 ) {
// EltInVec already at correct index and other bits are 0.
// Clean the first bit in source vector.
Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
DAG.getConstant(1 , dl, MVT::i8));
Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
DAG.getConstant(1, dl, MVT::i8));

return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
}
if (IdxVal == NumElems -1) {
// Move the bit to the last position inside the vector.
EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
DAG.getConstant(IdxVal, dl, MVT::i8));
if (Vec.isUndef())
return EltInVec;
return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
// Clean the last bit in the source vector.
Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
DAG.getConstant(1, dl, MVT::i8));
Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
DAG.getConstant(1 , dl, MVT::i8));

return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
}

// Use shuffle to insert element.
SmallVector<int, 64> MaskVec(NumElems);
for (unsigned i = 0; i != NumElems; ++i)
MaskVec[i] = (i == IdxVal) ? NumElems : i;

return DAG.getVectorShuffle(VecVT, dl, Vec, EltInVec, MaskVec);
}

SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
27 changes: 19 additions & 8 deletions llvm/lib/Target/X86/X86InstrAVX512.td
Original file line number Diff line number Diff line change
@@ -2503,6 +2503,8 @@ defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
let Predicates = [HasAVX512] in {
def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
def : Pat<(v4i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK4)>;
def : Pat<(v2i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK2)>;
def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
def : Pat<(v4i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK4)>;
def : Pat<(v2i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK2)>;
@@ -2556,15 +2558,24 @@ def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))),
def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
(v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;

def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
(v8i1 (COPY_TO_REGCLASS
(KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16),
(I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;

def : Pat<(v4i1 (X86vshli VK4:$src, (i8 imm:$imm))),
(v4i1 (COPY_TO_REGCLASS
(KSHIFTLWri (COPY_TO_REGCLASS VK4:$src, VK16),
(I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
// Patterns for kmask shift
multiclass mask_shift_lowering<RegisterClass RC, ValueType VT> {
def : Pat<(VT (X86vshli RC:$src, (i8 imm:$imm))),
(VT (COPY_TO_REGCLASS
(KSHIFTLWri (COPY_TO_REGCLASS RC:$src, VK16),
(I8Imm $imm)),
RC))>;
def : Pat<(VT (X86vsrli RC:$src, (i8 imm:$imm))),
(VT (COPY_TO_REGCLASS
(KSHIFTRWri (COPY_TO_REGCLASS RC:$src, VK16),
(I8Imm $imm)),
RC))>;
}

defm : mask_shift_lowering<VK8, v8i1>, Requires<[HasAVX512, NoDQI]>;
defm : mask_shift_lowering<VK4, v4i1>, Requires<[HasAVX512]>;
defm : mask_shift_lowering<VK2, v2i1>, Requires<[HasAVX512]>;
//===----------------------------------------------------------------------===//
// AVX-512 - Aligned and unaligned load and store
//
2 changes: 2 additions & 0 deletions llvm/test/CodeGen/X86/avx512-ext.ll
Original file line number Diff line number Diff line change
@@ -1429,6 +1429,8 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; ALL-NEXT: kmovw %edi, %k0
; ALL-NEXT: movw $-4, %ax
; ALL-NEXT: kmovw %eax, %k1
; ALL-NEXT: kshiftrw $1, %k1, %k1
; ALL-NEXT: kshiftlw $1, %k1, %k1
; ALL-NEXT: korw %k0, %k1, %k0
; ALL-NEXT: kmovw %k0, %eax
; ALL-NEXT: retq
110 changes: 98 additions & 12 deletions llvm/test/CodeGen/X86/avx512-insert-extract.ll
Original file line number Diff line number Diff line change
@@ -274,6 +274,8 @@ define i16 @test13(i32 %a, i32 %b) {
; KNL-NEXT: kmovw %eax, %k0
; KNL-NEXT: movw $-4, %ax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: kshiftrw $1, %k1, %k1
; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: korw %k0, %k1, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: retq
@@ -285,6 +287,8 @@ define i16 @test13(i32 %a, i32 %b) {
; SKX-NEXT: kmovw %eax, %k0
; SKX-NEXT: movw $-4, %ax
; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: kshiftrw $1, %k1, %k1
; SKX-NEXT: kshiftlw $1, %k1, %k1
; SKX-NEXT: korw %k0, %k1, %k0
; SKX-NEXT: kmovw %k0, %eax
; SKX-NEXT: retq
@@ -351,10 +355,15 @@ define i16 @test16(i1 *%addr, i16 %a) {
; KNL: ## BB#0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k0
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kshiftlw $10, %k0, %k0
; KNL-NEXT: korw %k0, %k1, %k0
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: kmovw %esi, %k2
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k2} {z}
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
; KNL-NEXT: vpermt2d %zmm0, %zmm2, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: retq
;
@@ -364,8 +373,11 @@ define i16 @test16(i1 *%addr, i16 %a) {
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kmovw %esi, %k1
; SKX-NEXT: kshiftlw $10, %k0, %k0
; SKX-NEXT: korw %k0, %k1, %k0
; SKX-NEXT: vpmovm2d %k1, %zmm0
; SKX-NEXT: vpmovm2d %k0, %zmm1
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
; SKX-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
; SKX-NEXT: vpmovd2m %zmm0, %k0
; SKX-NEXT: kmovw %k0, %eax
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
@@ -380,10 +392,15 @@ define i8 @test17(i1 *%addr, i8 %a) {
; KNL: ## BB#0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k0
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kshiftlw $4, %k0, %k0
; KNL-NEXT: korw %k0, %k1, %k0
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: kmovw %esi, %k2
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k2} {z}
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
; KNL-NEXT: vpermt2q %zmm0, %zmm2, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: retq
;
@@ -393,8 +410,11 @@ define i8 @test17(i1 *%addr, i8 %a) {
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kmovb %esi, %k1
; SKX-NEXT: kshiftlb $4, %k0, %k0
; SKX-NEXT: korb %k0, %k1, %k0
; SKX-NEXT: vpmovm2q %k1, %zmm0
; SKX-NEXT: vpmovm2q %k0, %zmm1
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
; SKX-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; SKX-NEXT: vpmovq2m %zmm0, %k0
; SKX-NEXT: kmovb %k0, %eax
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
@@ -1012,6 +1032,72 @@ define <32 x i8> @test_insert_128_v32i8(<32 x i8> %x, i8 %y) {
ret <32 x i8> %r
}

define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32> %y) {
; SKX-LABEL: test_insertelement_v32i1:
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: kmovw %eax, %k0
; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k1
; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k2
; SKX-NEXT: kunpckwd %k1, %k2, %k1
; SKX-NEXT: vpmovm2w %k1, %zmm0
; SKX-NEXT: vpmovm2w %k0, %zmm1
; SKX-NEXT: vmovdqu16 {{.*#+}} zmm2 = [0,1,2,3,32,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; SKX-NEXT: vpermt2w %zmm1, %zmm2, %zmm0
; SKX-NEXT: vpmovw2m %zmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <32 x i32> %x, %y
%maskv = insertelement <32 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 4
%res = bitcast <32 x i1> %maskv to i32
ret i32 %res
}

define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) {
; SKX-LABEL: test_iinsertelement_v4i1:
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: kmovw %eax, %k0
; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k1
; SKX-NEXT: vpmovm2d %k1, %xmm0
; SKX-NEXT: vpmovm2d %k0, %xmm1
; SKX-NEXT: vpbroadcastq %xmm1, %xmm1
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; SKX-NEXT: vpmovd2m %xmm0, %k0
; SKX-NEXT: kmovb %k0, %eax
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <4 x i32> %x, %y
%maskv = insertelement <4 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 2
%res0 = shufflevector <4 x i1> %maskv, <4 x i1> undef , <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
%res = bitcast <8 x i1> %res0 to i8
ret i8 %res
}

define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y) {
; SKX-LABEL: test_iinsertelement_v2i1:
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: kmovw %eax, %k0
; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
; SKX-NEXT: kshiftlw $1, %k1, %k1
; SKX-NEXT: kshiftrw $1, %k1, %k1
; SKX-NEXT: kshiftlw $1, %k0, %k0
; SKX-NEXT: korw %k0, %k1, %k0
; SKX-NEXT: kmovb %k0, %eax
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <2 x i64> %x, %y
%maskv = insertelement <2 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 1
%res0 = shufflevector <2 x i1> %maskv, <2 x i1> undef , <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
%res = bitcast <8 x i1> %res0 to i8
ret i8 %res
}

define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; KNL-LABEL: test_extractelement_v2i1:
; KNL: ## BB#0:
59 changes: 41 additions & 18 deletions llvm/test/CodeGen/X86/avx512-mask-op.ll
Original file line number Diff line number Diff line change
@@ -540,8 +540,14 @@ define <64 x i8> @test16(i64 %x) {
; SKX-NEXT: kmovq %rdi, %k0
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1
; SKX-NEXT: kshiftlq $5, %k1, %k1
; SKX-NEXT: korq %k1, %k0, %k0
; SKX-NEXT: vpmovm2b %k1, %zmm0
; SKX-NEXT: vpsllq $40, %xmm0, %xmm0
; SKX-NEXT: vpmovm2b %k0, %zmm1
; SKX-NEXT: vmovdqu8 {{.*#+}} ymm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; SKX-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1
; SKX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; SKX-NEXT: vpmovb2m %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: retq
%a = bitcast i64 %x to <64 x i1>
@@ -601,8 +607,14 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; SKX-NEXT: cmpl %edx, %esi
; SKX-NEXT: setg %al
; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: kshiftlq $5, %k1, %k1
; SKX-NEXT: korq %k1, %k0, %k0
; SKX-NEXT: vpmovm2b %k1, %zmm0
; SKX-NEXT: vpsllq $40, %xmm0, %xmm0
; SKX-NEXT: vpmovm2b %k0, %zmm1
; SKX-NEXT: vmovdqu8 {{.*#+}} ymm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; SKX-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1
; SKX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; SKX-NEXT: vpmovb2m %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: retq
%a = bitcast i64 %x to <64 x i1>
@@ -615,17 +627,23 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-LABEL: test18:
; KNL: ## BB#0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kshiftlw $7, %k1, %k2
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: kmovw %esi, %k2
; KNL-NEXT: kshiftlw $7, %k2, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kshiftlw $6, %k2, %k2
; KNL-NEXT: kshiftrw $15, %k2, %k2
; KNL-NEXT: kshiftlw $6, %k1, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
; KNL-NEXT: kshiftlw $6, %k1, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kshiftlw $7, %k2, %k1
; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,8,7]
; KNL-NEXT: vpermt2q %zmm2, %zmm3, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: kshiftrw $1, %k1, %k1
; KNL-NEXT: kshiftlw $7, %k0, %k0
; KNL-NEXT: korw %k0, %k1, %k1
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqw %zmm0, %xmm0
; KNL-NEXT: retq
@@ -634,13 +652,18 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; SKX: ## BB#0:
; SKX-NEXT: kmovb %edi, %k0
; SKX-NEXT: kmovw %esi, %k1
; SKX-NEXT: kshiftlw $6, %k1, %k2
; SKX-NEXT: kshiftlw $7, %k1, %k2
; SKX-NEXT: kshiftrw $15, %k2, %k2
; SKX-NEXT: kshiftlw $7, %k1, %k1
; SKX-NEXT: kshiftlw $6, %k1, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1
; SKX-NEXT: kshiftlb $7, %k1, %k1
; SKX-NEXT: kshiftlb $6, %k2, %k2
; SKX-NEXT: korb %k2, %k0, %k0
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: vpmovm2q %k1, %zmm1
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; SKX-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; SKX-NEXT: vpmovq2m %zmm0, %k0
; SKX-NEXT: kshiftlb $1, %k0, %k0
; SKX-NEXT: kshiftrb $1, %k0, %k0
; SKX-NEXT: kshiftlb $7, %k2, %k1
; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: retq
3 changes: 1 addition & 2 deletions llvm/test/CodeGen/X86/avx512-skx-insert-subvec.ll
Original file line number Diff line number Diff line change
@@ -33,8 +33,7 @@ define <8 x i1> @test2(<2 x i1> %a) {
; CHECK-NEXT: vpmovm2q %k0, %zmm0
; CHECK-NEXT: vpxord %zmm1, %zmm1, %zmm1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
; CHECK-NEXT: vpsllq $63, %zmm0, %zmm0
; CHECK-NEXT: vptestmq %zmm0, %zmm0, %k0
; CHECK-NEXT: vpmovq2m %zmm0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: retq
%res = shufflevector <2 x i1> %a, <2 x i1> zeroinitializer, <8 x i32> <i32 3, i32 3, i32 undef, i32 undef, i32 0, i32 1, i32 undef, i32 undef>
41 changes: 13 additions & 28 deletions llvm/test/CodeGen/X86/vector-shuffle-v1.ll
Original file line number Diff line number Diff line change
@@ -16,8 +16,7 @@ define <2 x i1> @shuf2i1_1_0(<2 x i1> %a) {
; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
; VL_BW_DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0
; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovq2m %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%b = shufflevector <2 x i1> %a, <2 x i1> undef, <2 x i32> <i32 1, i32 0>
@@ -41,8 +40,7 @@ define <2 x i1> @shuf2i1_1_2(<2 x i1> %a) {
; VL_BW_DQ-NEXT: kmovb %eax, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm1
; VL_BW_DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0
; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovq2m %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%b = shufflevector <2 x i1> %a, <2 x i1> <i1 1, i1 0>, <2 x i32> <i32 1, i32 2>
@@ -62,8 +60,7 @@ define <4 x i1> @shuf4i1_3_2_10(<4 x i1> %a) {
; VL_BW_DQ-NEXT: vptestmd %xmm0, %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2d %k0, %xmm0
; VL_BW_DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
; VL_BW_DQ-NEXT: vpslld $31, %xmm0, %xmm0
; VL_BW_DQ-NEXT: vptestmd %xmm0, %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovd2m %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2d %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%b = shufflevector <4 x i1> %a, <4 x i1> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -90,8 +87,7 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0(<8 x i64> %a, <8 x i64> %b, <8 x i64> %
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0]
; VL_BW_DQ-NEXT: vpermq %zmm0, %zmm1, %zmm0
; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2w %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%a2 = icmp eq <8 x i64> %a, %a1
@@ -124,8 +120,7 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<16 x i32> %a, <1
; VL_BW_DQ-NEXT: vpmovm2d %k0, %zmm1
; VL_BW_DQ-NEXT: vmovdqa32 {{.*#+}} zmm2 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0]
; VL_BW_DQ-NEXT: vpermt2d %zmm0, %zmm2, %zmm1
; VL_BW_DQ-NEXT: vpslld $31, %zmm1, %zmm0
; VL_BW_DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovd2m %zmm1, %k0
; VL_BW_DQ-NEXT: vpmovm2b %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%a2 = icmp eq <16 x i32> %a, %a1
@@ -151,7 +146,6 @@ define <32 x i1> @shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0
; VL_BW_DQ-NEXT: vpmovm2w %k0, %zmm0
; VL_BW_DQ-NEXT: vmovdqu16 {{.*#+}} zmm1 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0,3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0]
; VL_BW_DQ-NEXT: vpermw %zmm0, %zmm1, %zmm0
; VL_BW_DQ-NEXT: vpsllw $15, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vpmovw2m %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2b %k0, %ymm0
; VL_BW_DQ-NEXT: retq
@@ -179,8 +173,7 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u(i8 %a) {
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vextracti64x2 $1, %zmm0, %xmm0
; VL_BW_DQ-NEXT: vpbroadcastq %xmm0, %zmm0
; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2w %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -209,8 +202,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
; VL_BW_DQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = <8,2,10,u,3,u,2,u>
; VL_BW_DQ-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -236,8 +228,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
; VL_BW_DQ-NEXT: kmovb %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -267,8 +258,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
; VL_BW_DQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,6,1,0,3,7,7,0]
; VL_BW_DQ-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -298,8 +288,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
; VL_BW_DQ-NEXT: vpxord %zmm2, %zmm2, %zmm2
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpsllq $63, %zmm2, %zmm0
; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -333,8 +322,7 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm1
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [9,6,1,0,3,7,7,1]
; VL_BW_DQ-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -366,8 +354,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,3,4,5,6,7]
; VL_BW_DQ-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpsllq $63, %zmm2, %zmm0
; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%c = shufflevector <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1> %a, <8 x i32> <i32 9, i32 6, i32 1, i32 0, i32 3, i32 7, i32 7, i32 0>
@@ -393,8 +380,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
; VL_BW_DQ-NEXT: kmovw %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2d %k0, %zmm0
; VL_BW_DQ-NEXT: vpbroadcastd %xmm0, %zmm0
; VL_BW_DQ-NEXT: vpslld $31, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovd2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovw %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i16 %a to <16 x i1>
@@ -444,7 +430,6 @@ define i64 @shuf64i1_zero(i64 %a) {
; VL_BW_DQ-NEXT: kmovq %rdi, %k0
; VL_BW_DQ-NEXT: vpmovm2b %k0, %zmm0
; VL_BW_DQ-NEXT: vpbroadcastb %xmm0, %zmm0
; VL_BW_DQ-NEXT: vpsllw $7, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vpmovb2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovq %k0, %rax
; VL_BW_DQ-NEXT: retq

0 comments on commit 8672408

Please sign in to comment.