Index: llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -702,131 +702,7 @@ std::pair VectorLegalizer::ExpandLoad(SDNode *N) { LoadSDNode *LD = cast(N); - - EVT SrcVT = LD->getMemoryVT(); - EVT SrcEltVT = SrcVT.getScalarType(); - unsigned NumElem = SrcVT.getVectorNumElements(); - - SDValue NewChain; - SDValue Value; - if (SrcVT.getVectorNumElements() > 1 && !SrcEltVT.isByteSized()) { - SDLoc dl(N); - - SmallVector Vals; - SmallVector LoadChains; - - EVT DstEltVT = LD->getValueType(0).getScalarType(); - SDValue Chain = LD->getChain(); - SDValue BasePTR = LD->getBasePtr(); - ISD::LoadExtType ExtType = LD->getExtensionType(); - - // When elements in a vector is not byte-addressable, we cannot directly - // load each element by advancing pointer, which could only address bytes. - // Instead, we load all significant words, mask bits off, and concatenate - // them to form each element. Finally, they are extended to destination - // scalar type to build the destination vector. - EVT WideVT = TLI.getPointerTy(DAG.getDataLayout()); - - assert(WideVT.isRound() && - "Could not handle the sophisticated case when the widest integer is" - " not power of 2."); - assert(WideVT.bitsGE(SrcEltVT) && - "Type is not legalized?"); - - unsigned WideBytes = WideVT.getStoreSize(); - unsigned Offset = 0; - unsigned RemainingBytes = SrcVT.getStoreSize(); - SmallVector LoadVals; - while (RemainingBytes > 0) { - SDValue ScalarLoad; - unsigned LoadBytes = WideBytes; - - if (RemainingBytes >= LoadBytes) { - ScalarLoad = DAG.getLoad( - WideVT, dl, Chain, BasePTR, - LD->getPointerInfo().getWithOffset(Offset), LD->getOriginalAlign(), - LD->getMemOperand()->getFlags(), LD->getAAInfo()); - } else { - EVT LoadVT = WideVT; - while (RemainingBytes < LoadBytes) { - LoadBytes >>= 1; // Reduce the load size by half. - LoadVT = EVT::getIntegerVT(*DAG.getContext(), LoadBytes << 3); - } - ScalarLoad = - DAG.getExtLoad(ISD::EXTLOAD, dl, WideVT, Chain, BasePTR, - LD->getPointerInfo().getWithOffset(Offset), LoadVT, - LD->getOriginalAlign(), - LD->getMemOperand()->getFlags(), LD->getAAInfo()); - } - - RemainingBytes -= LoadBytes; - Offset += LoadBytes; - - BasePTR = DAG.getObjectPtrOffset(dl, BasePTR, LoadBytes); - - LoadVals.push_back(ScalarLoad.getValue(0)); - LoadChains.push_back(ScalarLoad.getValue(1)); - } - - unsigned BitOffset = 0; - unsigned WideIdx = 0; - unsigned WideBits = WideVT.getSizeInBits(); - - // Extract bits, pack and extend/trunc them into destination type. - unsigned SrcEltBits = SrcEltVT.getSizeInBits(); - SDValue SrcEltBitMask = DAG.getConstant( - APInt::getLowBitsSet(WideBits, SrcEltBits), dl, WideVT); - - for (unsigned Idx = 0; Idx != NumElem; ++Idx) { - assert(BitOffset < WideBits && "Unexpected offset!"); - - SDValue ShAmt = DAG.getConstant( - BitOffset, dl, TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); - SDValue Lo = DAG.getNode(ISD::SRL, dl, WideVT, LoadVals[WideIdx], ShAmt); - - BitOffset += SrcEltBits; - if (BitOffset >= WideBits) { - WideIdx++; - BitOffset -= WideBits; - if (BitOffset > 0) { - ShAmt = DAG.getConstant( - SrcEltBits - BitOffset, dl, - TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); - SDValue Hi = - DAG.getNode(ISD::SHL, dl, WideVT, LoadVals[WideIdx], ShAmt); - Lo = DAG.getNode(ISD::OR, dl, WideVT, Lo, Hi); - } - } - - Lo = DAG.getNode(ISD::AND, dl, WideVT, Lo, SrcEltBitMask); - - switch (ExtType) { - default: llvm_unreachable("Unknown extended-load op!"); - case ISD::EXTLOAD: - Lo = DAG.getAnyExtOrTrunc(Lo, dl, DstEltVT); - break; - case ISD::ZEXTLOAD: - Lo = DAG.getZExtOrTrunc(Lo, dl, DstEltVT); - break; - case ISD::SEXTLOAD: - ShAmt = - DAG.getConstant(WideBits - SrcEltBits, dl, - TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); - Lo = DAG.getNode(ISD::SHL, dl, WideVT, Lo, ShAmt); - Lo = DAG.getNode(ISD::SRA, dl, WideVT, Lo, ShAmt); - Lo = DAG.getSExtOrTrunc(Lo, dl, DstEltVT); - break; - } - Vals.push_back(Lo); - } - - NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); - Value = DAG.getBuildVector(N->getValueType(0), dl, Vals); - } else { - std::tie(Value, NewChain) = TLI.scalarizeVectorLoad(LD, DAG); - } - - return std::make_pair(Value, NewChain); + return TLI.scalarizeVectorLoad(LD, DAG); } SDValue VectorLegalizer::ExpandStore(SDNode *N) { Index: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -6599,6 +6599,9 @@ LD->getMemOperand()->getFlags(), LD->getAAInfo()); + SDValue SrcEltBitMask = DAG.getConstant( + APInt::getLowBitsSet(NumBits, SrcEltVT.getSizeInBits()), SL, IntVT); + SmallVector Vals; for (unsigned Idx = 0; Idx < NumElem; ++Idx) { unsigned ShiftIntoIdx = @@ -6607,7 +6610,8 @@ DAG.getConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), SL, IntVT); SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, IntVT, Load, ShiftAmount); - SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, ShiftedElt); + SDValue Lo = DAG.getNode(ISD::AND, SL, IntVT, ShiftedElt, SrcEltBitMask); + SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Lo); if (ExtType != ISD::NON_EXTLOAD) { unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType); Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar); Index: llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll =================================================================== --- llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll +++ llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll @@ -611,11 +611,12 @@ define void @load_v3i1_broadcast_1_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) { ; AVX512-LABEL: load_v3i1_broadcast_1_v1i1_store: ; AVX512: # %bb.0: -; AVX512-NEXT: movzbl (%rdi), %eax +; AVX512-NEXT: movb (%rdi), %al +; AVX512-NEXT: shrb %al ; AVX512-NEXT: xorl %ecx, %ecx -; AVX512-NEXT: btl $1, %eax +; AVX512-NEXT: testb $1, %al ; AVX512-NEXT: movl $255, %eax -; AVX512-NEXT: cmovael %ecx, %eax +; AVX512-NEXT: cmovel %ecx, %eax ; AVX512-NEXT: kmovd %eax, %k0 ; AVX512-NEXT: kshiftrb $1, %k0, %k0 ; AVX512-NEXT: kmovb %k0, (%rsi) @@ -623,11 +624,12 @@ ; ; AVX512NOTDQ-LABEL: load_v3i1_broadcast_1_v1i1_store: ; AVX512NOTDQ: # %bb.0: -; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax +; AVX512NOTDQ-NEXT: movb (%rdi), %al +; AVX512NOTDQ-NEXT: shrb %al ; AVX512NOTDQ-NEXT: xorl %ecx, %ecx -; AVX512NOTDQ-NEXT: btl $1, %eax +; AVX512NOTDQ-NEXT: testb $1, %al ; AVX512NOTDQ-NEXT: movl $255, %eax -; AVX512NOTDQ-NEXT: cmovael %ecx, %eax +; AVX512NOTDQ-NEXT: cmovel %ecx, %eax ; AVX512NOTDQ-NEXT: kmovd %eax, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $1, %k0, %k0 ; AVX512NOTDQ-NEXT: kmovd %k0, %eax Index: llvm/test/CodeGen/X86/bitcast-vector-bool.ll =================================================================== --- llvm/test/CodeGen/X86/bitcast-vector-bool.ll +++ llvm/test/CodeGen/X86/bitcast-vector-bool.ll @@ -49,11 +49,12 @@ ; SSE2-SSSE3: # %bb.0: ; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax ; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $3, %ecx -; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 -; SSE2-SSSE3-NEXT: shrl $2, %eax -; SSE2-SSSE3-NEXT: movq %rax, %xmm1 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: shrb $2, %cl +; SSE2-SSSE3-NEXT: movzbl %cl, %ecx +; SSE2-SSSE3-NEXT: andb $3, %al +; SSE2-SSSE3-NEXT: movzbl %al, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: pinsrw $4, %ecx, %xmm0 ; SSE2-SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al @@ -61,10 +62,10 @@ ; ; AVX12-LABEL: bitcast_v4i32_to_v2i2: ; AVX12: # %bb.0: -; AVX12-NEXT: vmovmskps %xmm0, %ecx -; AVX12-NEXT: movl %ecx, %eax -; AVX12-NEXT: shrl $2, %eax -; AVX12-NEXT: andl $3, %ecx +; AVX12-NEXT: vmovmskps %xmm0, %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrb $2, %cl +; AVX12-NEXT: andb $3, %al ; AVX12-NEXT: addb %cl, %al ; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq @@ -73,11 +74,10 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %k0 -; AVX512-NEXT: kmovd %k0, %ecx -; AVX512-NEXT: movzbl %cl, %eax -; AVX512-NEXT: shrl $2, %eax -; AVX512-NEXT: andl $3, %eax -; AVX512-NEXT: andl $3, %ecx +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: shrb $2, %cl +; AVX512-NEXT: andb $3, %al ; AVX512-NEXT: addb %cl, %al ; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: retq @@ -94,13 +94,14 @@ ; SSE2-SSSE3: # %bb.0: ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0 ; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax -; SSE2-SSSE3-NEXT: movzbl %al, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 -; SSE2-SSSE3-NEXT: andl $15, %eax -; SSE2-SSSE3-NEXT: movq %rax, %xmm1 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrb $4, %cl +; SSE2-SSSE3-NEXT: movzbl %cl, %ecx +; SSE2-SSSE3-NEXT: andb $15, %al +; SSE2-SSSE3-NEXT: movzbl %al, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: pinsrw $4, %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: retq @@ -108,10 +109,10 @@ ; AVX12-LABEL: bitcast_v8i16_to_v2i4: ; AVX12: # %bb.0: ; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 -; AVX12-NEXT: vpmovmskb %xmm0, %ecx -; AVX12-NEXT: movzbl %cl, %eax -; AVX12-NEXT: shrl $4, %eax -; AVX12-NEXT: andl $15, %ecx +; AVX12-NEXT: vpmovmskb %xmm0, %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrb $4, %cl +; AVX12-NEXT: andb $15, %al ; AVX12-NEXT: addb %cl, %al ; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq @@ -119,10 +120,10 @@ ; AVX512-LABEL: bitcast_v8i16_to_v2i4: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovw2m %xmm0, %k0 -; AVX512-NEXT: kmovd %k0, %ecx -; AVX512-NEXT: movzbl %cl, %eax -; AVX512-NEXT: shrl $4, %eax -; AVX512-NEXT: andl $15, %ecx +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: shrb $4, %cl +; AVX512-NEXT: andb $15, %al ; AVX512-NEXT: addb %cl, %al ; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: retq @@ -181,22 +182,23 @@ ; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax ; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 -; SSE2-SSSE3-NEXT: andl $3, %eax -; SSE2-SSSE3-NEXT: movq %rax, %xmm1 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: shrb $2, %cl +; SSE2-SSSE3-NEXT: movzbl %cl, %ecx +; SSE2-SSSE3-NEXT: andb $3, %al +; SSE2-SSSE3-NEXT: movzbl %al, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: pinsrw $4, %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: bitcast_v4i64_to_v2i2: ; AVX12: # %bb.0: -; AVX12-NEXT: vmovmskpd %ymm0, %ecx -; AVX12-NEXT: movl %ecx, %eax -; AVX12-NEXT: shrl $2, %eax -; AVX12-NEXT: andl $3, %ecx +; AVX12-NEXT: vmovmskpd %ymm0, %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrb $2, %cl +; AVX12-NEXT: andb $3, %al ; AVX12-NEXT: addb %cl, %al ; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: vzeroupper @@ -206,11 +208,10 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512-NEXT: vpcmpgtq %ymm0, %ymm1, %k0 -; AVX512-NEXT: kmovd %k0, %ecx -; AVX512-NEXT: movzbl %cl, %eax -; AVX512-NEXT: shrl $2, %eax -; AVX512-NEXT: andl $3, %eax -; AVX512-NEXT: andl $3, %ecx +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: shrb $2, %cl +; AVX512-NEXT: andb $3, %al ; AVX512-NEXT: addb %cl, %al ; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: vzeroupper @@ -229,23 +230,24 @@ ; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0 ; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax -; SSE2-SSSE3-NEXT: movzbl %al, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 -; SSE2-SSSE3-NEXT: andl $15, %eax -; SSE2-SSSE3-NEXT: movq %rax, %xmm1 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrb $4, %cl +; SSE2-SSSE3-NEXT: movzbl %cl, %ecx +; SSE2-SSSE3-NEXT: andb $15, %al +; SSE2-SSSE3-NEXT: movzbl %al, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: pinsrw $4, %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: bitcast_v8i32_to_v2i4: ; AVX12: # %bb.0: -; AVX12-NEXT: vmovmskps %ymm0, %ecx -; AVX12-NEXT: movl %ecx, %eax -; AVX12-NEXT: shrl $4, %eax -; AVX12-NEXT: andl $15, %ecx +; AVX12-NEXT: vmovmskps %ymm0, %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrb $4, %cl +; AVX12-NEXT: andb $15, %al ; AVX12-NEXT: addb %cl, %al ; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: vzeroupper @@ -255,10 +257,10 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512-NEXT: vpcmpgtd %ymm0, %ymm1, %k0 -; AVX512-NEXT: kmovd %k0, %ecx -; AVX512-NEXT: movzbl %cl, %eax -; AVX512-NEXT: shrl $4, %eax -; AVX512-NEXT: andl $15, %ecx +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: shrb $4, %cl +; AVX512-NEXT: andb $15, %al ; AVX512-NEXT: addb %cl, %al ; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: vzeroupper @@ -391,13 +393,14 @@ ; SSE2-SSSE3-NEXT: packssdw %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0 ; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax -; SSE2-SSSE3-NEXT: movzbl %al, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 -; SSE2-SSSE3-NEXT: andl $15, %eax -; SSE2-SSSE3-NEXT: movq %rax, %xmm1 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrb $4, %cl +; SSE2-SSSE3-NEXT: movzbl %cl, %ecx +; SSE2-SSSE3-NEXT: andb $15, %al +; SSE2-SSSE3-NEXT: movzbl %al, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: pinsrw $4, %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: retq @@ -412,10 +415,10 @@ ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovmskps %ymm0, %ecx -; AVX1-NEXT: movl %ecx, %eax -; AVX1-NEXT: shrl $4, %eax -; AVX1-NEXT: andl $15, %ecx +; AVX1-NEXT: vmovmskps %ymm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrb $4, %cl +; AVX1-NEXT: andb $15, %al ; AVX1-NEXT: addb %cl, %al ; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper @@ -425,10 +428,10 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX2-NEXT: vmovmskps %ymm0, %ecx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: shrl $4, %eax -; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vmovmskps %ymm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrb $4, %cl +; AVX2-NEXT: andb $15, %al ; AVX2-NEXT: addb %cl, %al ; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper @@ -438,10 +441,10 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512-NEXT: vpcmpgtq %zmm0, %zmm1, %k0 -; AVX512-NEXT: kmovd %k0, %ecx -; AVX512-NEXT: movzbl %cl, %eax -; AVX512-NEXT: shrl $4, %eax -; AVX512-NEXT: andl $15, %ecx +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: shrb $4, %cl +; AVX512-NEXT: andb $15, %al ; AVX512-NEXT: addb %cl, %al ; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: vzeroupper Index: llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll =================================================================== --- llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll +++ llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll @@ -542,7 +542,7 @@ define <4 x i64> @_clearupper4xi64b(<4 x i64>) nounwind { ; SSE2-LABEL: _clearupper4xi64b: ; SSE2: # %bb.0: -; SSE2-NEXT: movaps {{.*#+}} xmm2 +; SSE2-NEXT: movaps {{.*#+}} xmm2 = [NaN,0.0E+0,NaN,0.0E+0] ; SSE2-NEXT: andps %xmm2, %xmm0 ; SSE2-NEXT: andps %xmm2, %xmm1 ; SSE2-NEXT: retq @@ -805,48 +805,48 @@ ; AVX-NEXT: pushq %rbx ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX-NEXT: movq -{{[0-9]+}}(%rsp), %r9 -; AVX-NEXT: movq -{{[0-9]+}}(%rsp), %rdx +; AVX-NEXT: movq -{{[0-9]+}}(%rsp), %rcx ; AVX-NEXT: movq %r9, %r8 ; AVX-NEXT: shrq $56, %r8 ; AVX-NEXT: andl $15, %r8d ; AVX-NEXT: movq %r9, %r10 ; AVX-NEXT: shrq $48, %r10 ; AVX-NEXT: andl $15, %r10d -; AVX-NEXT: movq %r9, %rsi -; AVX-NEXT: shrq $40, %rsi -; AVX-NEXT: andl $15, %esi +; AVX-NEXT: movq %rcx, %rdx +; AVX-NEXT: shldq $24, %r9, %rdx +; AVX-NEXT: andl $15, %edx ; AVX-NEXT: movq %r9, %r11 ; AVX-NEXT: shrq $32, %r11 ; AVX-NEXT: andl $15, %r11d -; AVX-NEXT: movq %rdx, %rdi +; AVX-NEXT: movq %rcx, %rdi ; AVX-NEXT: shrq $56, %rdi ; AVX-NEXT: andl $15, %edi -; AVX-NEXT: movq %rdx, %rax -; AVX-NEXT: shrq $48, %rax +; AVX-NEXT: movq %rcx, %rsi +; AVX-NEXT: shrq $48, %rsi +; AVX-NEXT: andl $15, %esi +; AVX-NEXT: movq %rcx, %rax +; AVX-NEXT: shrq $40, %rax ; AVX-NEXT: andl $15, %eax -; AVX-NEXT: movq %rdx, %rcx -; AVX-NEXT: shrq $40, %rcx -; AVX-NEXT: andl $15, %ecx -; AVX-NEXT: movq %rdx, %rbx +; AVX-NEXT: movq %rcx, %rbx ; AVX-NEXT: shrq $32, %rbx ; AVX-NEXT: andl $15, %ebx ; AVX-NEXT: shlq $32, %rbx -; AVX-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F -; AVX-NEXT: orq %rbx, %rdx -; AVX-NEXT: shlq $40, %rcx -; AVX-NEXT: orq %rdx, %rcx -; AVX-NEXT: shlq $48, %rax +; AVX-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F +; AVX-NEXT: orq %rbx, %rcx +; AVX-NEXT: shlq $40, %rax ; AVX-NEXT: orq %rcx, %rax +; AVX-NEXT: shlq $48, %rsi +; AVX-NEXT: orq %rax, %rsi ; AVX-NEXT: shlq $56, %rdi -; AVX-NEXT: orq %rax, %rdi +; AVX-NEXT: orq %rsi, %rdi ; AVX-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) ; AVX-NEXT: shlq $32, %r11 ; AVX-NEXT: andl $252645135, %r9d # imm = 0xF0F0F0F ; AVX-NEXT: orq %r11, %r9 -; AVX-NEXT: shlq $40, %rsi -; AVX-NEXT: orq %r9, %rsi +; AVX-NEXT: shlq $40, %rdx +; AVX-NEXT: orq %r9, %rdx ; AVX-NEXT: shlq $48, %r10 -; AVX-NEXT: orq %rsi, %r10 +; AVX-NEXT: orq %rdx, %r10 ; AVX-NEXT: shlq $56, %r8 ; AVX-NEXT: orq %r10, %r8 ; AVX-NEXT: movq %r8, -{{[0-9]+}}(%rsp) @@ -986,96 +986,96 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: movq %rax, %r8 ; AVX1-NEXT: movq %rax, %rdx ; AVX1-NEXT: movq %rax, %rsi ; AVX1-NEXT: movq %rax, %rdi -; AVX1-NEXT: shrq $32, %rdi -; AVX1-NEXT: andl $15, %edi -; AVX1-NEXT: shlq $32, %rdi +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shrq $32, %rcx +; AVX1-NEXT: andl $15, %ecx +; AVX1-NEXT: shlq $32, %rcx ; AVX1-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F -; AVX1-NEXT: orq %rdi, %rax -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdi -; AVX1-NEXT: shrq $40, %rsi +; AVX1-NEXT: orq %rcx, %rax +; AVX1-NEXT: shrq $40, %rdi +; AVX1-NEXT: andl $15, %edi +; AVX1-NEXT: shlq $40, %rdi +; AVX1-NEXT: orq %rax, %rdi +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax +; AVX1-NEXT: shrq $48, %rsi ; AVX1-NEXT: andl $15, %esi -; AVX1-NEXT: shlq $40, %rsi -; AVX1-NEXT: orq %rax, %rsi -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: shrq $48, %rdx +; AVX1-NEXT: shlq $48, %rsi +; AVX1-NEXT: orq %rdi, %rsi +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shrq $56, %rdx ; AVX1-NEXT: andl $15, %edx -; AVX1-NEXT: shlq $48, %rdx +; AVX1-NEXT: shlq $56, %rdx ; AVX1-NEXT: orq %rsi, %rdx -; AVX1-NEXT: movq %rdi, %rsi +; AVX1-NEXT: movq %rax, %rsi +; AVX1-NEXT: shldq $24, %rax, %r8 +; AVX1-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movq %rax, %rdx +; AVX1-NEXT: shrq $32, %rdx +; AVX1-NEXT: andl $15, %edx +; AVX1-NEXT: shlq $32, %rdx +; AVX1-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F +; AVX1-NEXT: orq %rdx, %rax +; AVX1-NEXT: andl $15, %r8d +; AVX1-NEXT: shlq $40, %r8 +; AVX1-NEXT: orq %rax, %r8 +; AVX1-NEXT: shrq $48, %rsi +; AVX1-NEXT: andl $15, %esi +; AVX1-NEXT: shlq $48, %rsi +; AVX1-NEXT: orq %r8, %rsi ; AVX1-NEXT: shrq $56, %rcx ; AVX1-NEXT: andl $15, %ecx +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: shlq $56, %rcx -; AVX1-NEXT: orq %rdx, %rcx -; AVX1-NEXT: movq %rdi, %rdx +; AVX1-NEXT: orq %rsi, %rcx +; AVX1-NEXT: vmovq %xmm0, %rax ; AVX1-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movq %rdi, %rcx +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $8, %ecx +; AVX1-NEXT: vmovd %eax, %xmm1 +; AVX1-NEXT: vpinsrb $1, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $16, %ecx +; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $24, %ecx +; AVX1-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movq %rax, %rcx ; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: andl $15, %ecx -; AVX1-NEXT: shlq $32, %rcx -; AVX1-NEXT: andl $252645135, %edi # imm = 0xF0F0F0F -; AVX1-NEXT: orq %rcx, %rdi -; AVX1-NEXT: shrq $40, %rdx -; AVX1-NEXT: andl $15, %edx -; AVX1-NEXT: shlq $40, %rdx -; AVX1-NEXT: orq %rdi, %rdx -; AVX1-NEXT: shrq $48, %rsi -; AVX1-NEXT: andl $15, %esi -; AVX1-NEXT: shlq $48, %rsi -; AVX1-NEXT: orq %rdx, %rsi +; AVX1-NEXT: vpinsrb $4, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shrq $40, %rcx +; AVX1-NEXT: vpinsrb $5, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shrq $48, %rcx +; AVX1-NEXT: vpinsrb $6, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: vpextrq $1, %xmm0, %rcx ; AVX1-NEXT: shrq $56, %rax -; AVX1-NEXT: andl $15, %eax -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: shlq $56, %rax -; AVX1-NEXT: orq %rsi, %rax -; AVX1-NEXT: vmovq %xmm0, %rcx -; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm0 ; AVX1-NEXT: movl %ecx, %eax ; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: vmovd %ecx, %xmm1 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 ; AVX1-NEXT: movl %ecx, %eax ; AVX1-NEXT: shrl $16, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ; AVX1-NEXT: movl %ecx, %eax ; AVX1-NEXT: shrl $24, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 ; AVX1-NEXT: movq %rcx, %rax ; AVX1-NEXT: shrq $32, %rax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ; AVX1-NEXT: movq %rcx, %rax ; AVX1-NEXT: shrq $40, %rax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 ; AVX1-NEXT: movq %rcx, %rax ; AVX1-NEXT: shrq $48, %rax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ; AVX1-NEXT: shrq $56, %rcx -; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $24, %ecx -; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq $40, %rcx -; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq $48, %rcx -; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrq $56, %rax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrb $15, %ecx, %xmm0, %xmm0 ; AVX1-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq @@ -1084,96 +1084,96 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: movq %rax, %r8 ; AVX2-NEXT: movq %rax, %rdx ; AVX2-NEXT: movq %rax, %rsi ; AVX2-NEXT: movq %rax, %rdi -; AVX2-NEXT: shrq $32, %rdi -; AVX2-NEXT: andl $15, %edi -; AVX2-NEXT: shlq $32, %rdi +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shrq $32, %rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: shlq $32, %rcx ; AVX2-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F -; AVX2-NEXT: orq %rdi, %rax -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rdi -; AVX2-NEXT: shrq $40, %rsi +; AVX2-NEXT: orq %rcx, %rax +; AVX2-NEXT: shrq $40, %rdi +; AVX2-NEXT: andl $15, %edi +; AVX2-NEXT: shlq $40, %rdi +; AVX2-NEXT: orq %rax, %rdi +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rax +; AVX2-NEXT: shrq $48, %rsi ; AVX2-NEXT: andl $15, %esi -; AVX2-NEXT: shlq $40, %rsi -; AVX2-NEXT: orq %rax, %rsi -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: shrq $48, %rdx +; AVX2-NEXT: shlq $48, %rsi +; AVX2-NEXT: orq %rdi, %rsi +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shrq $56, %rdx ; AVX2-NEXT: andl $15, %edx -; AVX2-NEXT: shlq $48, %rdx +; AVX2-NEXT: shlq $56, %rdx ; AVX2-NEXT: orq %rsi, %rdx -; AVX2-NEXT: movq %rdi, %rsi +; AVX2-NEXT: movq %rax, %rsi +; AVX2-NEXT: shldq $24, %rax, %r8 +; AVX2-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movq %rax, %rdx +; AVX2-NEXT: shrq $32, %rdx +; AVX2-NEXT: andl $15, %edx +; AVX2-NEXT: shlq $32, %rdx +; AVX2-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F +; AVX2-NEXT: orq %rdx, %rax +; AVX2-NEXT: andl $15, %r8d +; AVX2-NEXT: shlq $40, %r8 +; AVX2-NEXT: orq %rax, %r8 +; AVX2-NEXT: shrq $48, %rsi +; AVX2-NEXT: andl $15, %esi +; AVX2-NEXT: shlq $48, %rsi +; AVX2-NEXT: orq %r8, %rsi ; AVX2-NEXT: shrq $56, %rcx ; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: shlq $56, %rcx -; AVX2-NEXT: orq %rdx, %rcx -; AVX2-NEXT: movq %rdi, %rdx +; AVX2-NEXT: orq %rsi, %rcx +; AVX2-NEXT: vmovq %xmm0, %rax ; AVX2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movq %rdi, %rcx +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $8, %ecx +; AVX2-NEXT: vmovd %eax, %xmm1 +; AVX2-NEXT: vpinsrb $1, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $16, %ecx +; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $24, %ecx +; AVX2-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movq %rax, %rcx ; AVX2-NEXT: shrq $32, %rcx -; AVX2-NEXT: andl $15, %ecx -; AVX2-NEXT: shlq $32, %rcx -; AVX2-NEXT: andl $252645135, %edi # imm = 0xF0F0F0F -; AVX2-NEXT: orq %rcx, %rdi -; AVX2-NEXT: shrq $40, %rdx -; AVX2-NEXT: andl $15, %edx -; AVX2-NEXT: shlq $40, %rdx -; AVX2-NEXT: orq %rdi, %rdx -; AVX2-NEXT: shrq $48, %rsi -; AVX2-NEXT: andl $15, %esi -; AVX2-NEXT: shlq $48, %rsi -; AVX2-NEXT: orq %rdx, %rsi +; AVX2-NEXT: vpinsrb $4, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shrq $40, %rcx +; AVX2-NEXT: vpinsrb $5, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shrq $48, %rcx +; AVX2-NEXT: vpinsrb $6, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: vpextrq $1, %xmm0, %rcx ; AVX2-NEXT: shrq $56, %rax -; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: shlq $56, %rax -; AVX2-NEXT: orq %rsi, %rax -; AVX2-NEXT: vmovq %xmm0, %rcx -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm0 ; AVX2-NEXT: movl %ecx, %eax ; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: vmovd %ecx, %xmm1 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 ; AVX2-NEXT: movl %ecx, %eax ; AVX2-NEXT: shrl $16, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ; AVX2-NEXT: movl %ecx, %eax ; AVX2-NEXT: shrl $24, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 ; AVX2-NEXT: movq %rcx, %rax ; AVX2-NEXT: shrq $32, %rax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ; AVX2-NEXT: movq %rcx, %rax ; AVX2-NEXT: shrq $40, %rax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 ; AVX2-NEXT: movq %rcx, %rax ; AVX2-NEXT: shrq $48, %rax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ; AVX2-NEXT: shrq $56, %rcx -; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $16, %ecx -; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $24, %ecx -; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq $32, %rcx -; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq $40, %rcx -; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq $48, %rcx -; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shrq $56, %rax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrb $15, %ecx, %xmm0, %xmm0 ; AVX2-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq Index: llvm/test/CodeGen/X86/load-local-v3i1.ll =================================================================== --- llvm/test/CodeGen/X86/load-local-v3i1.ll +++ llvm/test/CodeGen/X86/load-local-v3i1.ll @@ -98,20 +98,23 @@ ; CHECK-NEXT: movq %rdi, %r14 ; CHECK-NEXT: movb (%rdx), %al ; CHECK-NEXT: movl %eax, %ecx -; CHECK-NEXT: shrb $2, %cl -; CHECK-NEXT: movzbl %al, %r15d -; CHECK-NEXT: shrb %al -; CHECK-NEXT: movzbl %al, %ebx -; CHECK-NEXT: movzbl %cl, %ebp +; CHECK-NEXT: shrb %cl +; CHECK-NEXT: andb $1, %cl +; CHECK-NEXT: movl %eax, %edx +; CHECK-NEXT: shrb $2, %dl +; CHECK-NEXT: andb $1, %al +; CHECK-NEXT: movzbl %al, %ebp +; CHECK-NEXT: movzbl %dl, %r15d +; CHECK-NEXT: movzbl %cl, %ebx ; CHECK-NEXT: movq %rsi, %rdi -; CHECK-NEXT: movl %r15d, %esi +; CHECK-NEXT: movl %ebp, %esi ; CHECK-NEXT: movl %ebx, %edx -; CHECK-NEXT: movl %ebp, %ecx +; CHECK-NEXT: movl %r15d, %ecx ; CHECK-NEXT: callq masked_load_v3 ; CHECK-NEXT: movq %r14, %rdi -; CHECK-NEXT: movl %r15d, %esi +; CHECK-NEXT: movl %ebp, %esi ; CHECK-NEXT: movl %ebx, %edx -; CHECK-NEXT: movl %ebp, %ecx +; CHECK-NEXT: movl %r15d, %ecx ; CHECK-NEXT: callq masked_store4_v3 ; CHECK-NEXT: addq $8, %rsp ; CHECK-NEXT: popq %rbx Index: llvm/test/CodeGen/X86/load-local-v4i5.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/X86/load-local-v4i5.ll @@ -0,0 +1,77 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s +@0 = internal unnamed_addr constant [4 x i5] [i5 2, i5 0, i5 2, i5 -1], align 1 + +; Function Attrs: nobuiltin nounwind +define void @_start() { +; CHECK-LABEL: _start: +; CHECK: # %bb.0: # %Entry +; CHECK-NEXT: movl {{.*}}(%rip), %eax +; CHECK-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi +; CHECK-NEXT: andl $31, %eax +; CHECK-NEXT: andl $31, %esi +; CHECK-NEXT: shll $5, %esi +; CHECK-NEXT: orl %eax, %esi +; CHECK-NEXT: andl $31, %edx +; CHECK-NEXT: shll $10, %edx +; CHECK-NEXT: orl %esi, %edx +; CHECK-NEXT: movzbl %cl, %eax +; CHECK-NEXT: movl %eax, %ecx +; CHECK-NEXT: shll $15, %ecx +; CHECK-NEXT: orl %edx, %ecx +; CHECK-NEXT: movw %cx, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: shrl $16, %ecx +; CHECK-NEXT: andl $15, %ecx +; CHECK-NEXT: movb %cl, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: cmpb $31, %al +; CHECK-NEXT: je .LBB0_2 +; CHECK-NEXT: # %bb.1: # %Then +; CHECK-NEXT: int3 +; CHECK-NEXT: .LBB0_2: # %EndIf +; CHECK-NEXT: retq +Entry: + %x = alloca [4 x i5], align 1 + %y = alloca <4 x i5>, align 4 + %z = alloca i5, align 1 + %0 = bitcast [4 x i5]* %x to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 bitcast ([4 x i5]* @0 to i8*), i64 4, i1 false) + %1 = getelementptr inbounds [4 x i5], [4 x i5]* %x, i64 0, i64 0 + %2 = load i5, i5* %1 + %3 = insertelement <4 x i5> undef, i5 %2, i32 0 + %4 = getelementptr inbounds [4 x i5], [4 x i5]* %x, i64 0, i64 1 + %5 = load i5, i5* %4 + %6 = insertelement <4 x i5> %3, i5 %5, i32 1 + %7 = getelementptr inbounds [4 x i5], [4 x i5]* %x, i64 0, i64 2 + %8 = load i5, i5* %7 + %9 = insertelement <4 x i5> %6, i5 %8, i32 2 + %10 = getelementptr inbounds [4 x i5], [4 x i5]* %x, i64 0, i64 3 + %11 = load i5, i5* %10 + %12 = insertelement <4 x i5> %9, i5 %11, i32 3 + store <4 x i5> %12, <4 x i5>* %y, align 4 + %13 = load <4 x i5>, <4 x i5>* %y + %14 = extractelement <4 x i5> %13, i32 3 + store i5 %14, i5* %z, align 1 + %15 = load i5, i5* %z, align 1 + %16 = icmp ne i5 %15, -1 + br i1 %16, label %Then, label %Else + +Then: ; preds = %Entry + call void @llvm.debugtrap() + br label %EndIf + +Else: ; preds = %Entry + br label %EndIf + +EndIf: ; preds = %Else, %Then + ret void +} + +; Function Attrs: argmemonly nounwind willreturn +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) + +; Function Attrs: nounwind +declare void @llvm.debugtrap() Index: llvm/test/CodeGen/X86/pr15267.ll =================================================================== --- llvm/test/CodeGen/X86/pr15267.ll +++ llvm/test/CodeGen/X86/pr15267.ll @@ -7,14 +7,18 @@ ; CHECK-NEXT: movzwl (%rdi), %eax ; CHECK-NEXT: movl %eax, %ecx ; CHECK-NEXT: shrl $3, %ecx -; CHECK-NEXT: vmovd %eax, %xmm0 +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: movl %eax, %edx +; CHECK-NEXT: andl $7, %edx +; CHECK-NEXT: vmovd %edx, %xmm0 ; CHECK-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 ; CHECK-NEXT: movl %eax, %ecx ; CHECK-NEXT: shrl $6, %ecx +; CHECK-NEXT: andl $7, %ecx ; CHECK-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 ; CHECK-NEXT: shrl $9, %eax +; CHECK-NEXT: andl $7, %eax ; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; CHECK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; CHECK-NEXT: retq %ret = load <4 x i3>, <4 x i3>* %in, align 1 ret <4 x i3> %ret @@ -23,17 +27,24 @@ define <4 x i1> @test2(<4 x i1>* %in) nounwind { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: -; CHECK-NEXT: movzbl (%rdi), %eax +; CHECK-NEXT: movb (%rdi), %al ; CHECK-NEXT: movl %eax, %ecx -; CHECK-NEXT: shrl %ecx -; CHECK-NEXT: vmovd %eax, %xmm0 -; CHECK-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 +; CHECK-NEXT: shrb %cl +; CHECK-NEXT: andb $1, %cl +; CHECK-NEXT: movzbl %cl, %ecx +; CHECK-NEXT: movl %eax, %edx +; CHECK-NEXT: andb $1, %dl +; CHECK-NEXT: movzbl %dl, %edx +; CHECK-NEXT: vmovd %edx, %xmm0 +; CHECK-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 ; CHECK-NEXT: movl %eax, %ecx -; CHECK-NEXT: shrl $2, %ecx -; CHECK-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; CHECK-NEXT: shrl $3, %eax -; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; CHECK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-NEXT: shrb $2, %cl +; CHECK-NEXT: andb $1, %cl +; CHECK-NEXT: movzbl %cl, %ecx +; CHECK-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; CHECK-NEXT: shrb $3, %al +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ; CHECK-NEXT: retq %ret = load <4 x i1>, <4 x i1>* %in, align 1 ret <4 x i1> %ret @@ -42,21 +53,26 @@ define <4 x i64> @test3(<4 x i1>* %in) nounwind { ; CHECK-LABEL: test3: ; CHECK: # %bb.0: -; CHECK-NEXT: movzbl (%rdi), %eax -; CHECK-NEXT: movq %rax, %rcx -; CHECK-NEXT: shlq $62, %rcx -; CHECK-NEXT: sarq $63, %rcx -; CHECK-NEXT: movq %rax, %rdx -; CHECK-NEXT: shlq $63, %rdx -; CHECK-NEXT: sarq $63, %rdx +; CHECK-NEXT: movb (%rdi), %al +; CHECK-NEXT: movzbl %al, %ecx +; CHECK-NEXT: shrb %al +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: negl %eax +; CHECK-NEXT: movl %ecx, %edx +; CHECK-NEXT: andl $1, %edx +; CHECK-NEXT: negl %edx ; CHECK-NEXT: vmovd %edx, %xmm0 -; CHECK-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; CHECK-NEXT: movq %rax, %rcx -; CHECK-NEXT: shlq $61, %rcx -; CHECK-NEXT: sarq $63, %rcx -; CHECK-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; CHECK-NEXT: shlq $60, %rax -; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; CHECK-NEXT: movl %ecx, %eax +; CHECK-NEXT: shrb $2, %al +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: negl %eax +; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; CHECK-NEXT: shrb $3, %cl +; CHECK-NEXT: movzbl %cl, %eax +; CHECK-NEXT: negl %eax ; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 ; CHECK-NEXT: vpmovsxdq %xmm0, %xmm1 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] Index: llvm/test/CodeGen/X86/vector-sext.ll =================================================================== --- llvm/test/CodeGen/X86/vector-sext.ll +++ llvm/test/CodeGen/X86/vector-sext.ll @@ -1342,41 +1342,44 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) { ; SSE-LABEL: load_sext_2i1_to_2i64: ; SSE: # %bb.0: # %entry -; SSE-NEXT: movzbl (%rdi), %eax -; SSE-NEXT: movq %rax, %rcx -; SSE-NEXT: shlq $62, %rcx -; SSE-NEXT: movq %rcx, %xmm0 -; SSE-NEXT: shlq $63, %rax +; SSE-NEXT: movb (%rdi), %al +; SSE-NEXT: movzbl %al, %ecx +; SSE-NEXT: shrb %al +; SSE-NEXT: movzbl %al, %eax +; SSE-NEXT: negq %rax ; SSE-NEXT: movq %rax, %xmm1 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE-NEXT: psrad $31, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: negq %rcx +; SSE-NEXT: movq %rcx, %xmm0 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: retq ; ; AVX1-LABEL: load_sext_2i1_to_2i64: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: movzbl (%rdi), %eax -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $62, %rcx -; AVX1-NEXT: vmovq %rcx, %xmm0 -; AVX1-NEXT: shlq $63, %rax -; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: movb (%rdi), %al +; AVX1-NEXT: movzbl %al, %ecx +; AVX1-NEXT: shrb %al +; AVX1-NEXT: movzbl %al, %eax +; AVX1-NEXT: negq %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: negq %rcx +; AVX1-NEXT: vmovq %rcx, %xmm1 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_sext_2i1_to_2i64: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: movzbl (%rdi), %eax -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $62, %rcx -; AVX2-NEXT: vmovq %rcx, %xmm0 -; AVX2-NEXT: shlq $63, %rax -; AVX2-NEXT: vmovq %rax, %xmm1 +; AVX2-NEXT: movb (%rdi), %al +; AVX2-NEXT: movzbl %al, %ecx +; AVX2-NEXT: shrb %al +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: negq %rax +; AVX2-NEXT: vmovq %rax, %xmm0 +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: negq %rcx +; AVX2-NEXT: vmovq %rcx, %xmm1 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: load_sext_2i1_to_2i64: @@ -1390,30 +1393,34 @@ ; X32-SSE2-LABEL: load_sext_2i1_to_2i64: ; X32-SSE2: # %bb.0: # %entry ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movzbl (%eax), %eax -; X32-SSE2-NEXT: movl %eax, %ecx -; X32-SSE2-NEXT: shll $30, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm0 -; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1] -; X32-SSE2-NEXT: shll $31, %eax +; X32-SSE2-NEXT: movb (%eax), %al +; X32-SSE2-NEXT: movzbl %al, %ecx +; X32-SSE2-NEXT: shrb %al +; X32-SSE2-NEXT: movzbl %al, %eax +; X32-SSE2-NEXT: negl %eax ; X32-SSE2-NEXT: movd %eax, %xmm0 +; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1] +; X32-SSE2-NEXT: andl $1, %ecx +; X32-SSE2-NEXT: negl %ecx +; X32-SSE2-NEXT: movd %ecx, %xmm0 ; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; X32-SSE2-NEXT: psrad $31, %xmm0 ; X32-SSE2-NEXT: retl ; ; X32-SSE41-LABEL: load_sext_2i1_to_2i64: ; X32-SSE41: # %bb.0: # %entry ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movzbl (%eax), %eax -; X32-SSE41-NEXT: movl %eax, %ecx -; X32-SSE41-NEXT: shll $31, %ecx +; X32-SSE41-NEXT: movb (%eax), %al +; X32-SSE41-NEXT: movzbl %al, %ecx +; X32-SSE41-NEXT: andl $1, %ecx +; X32-SSE41-NEXT: negl %ecx ; X32-SSE41-NEXT: movd %ecx, %xmm0 ; X32-SSE41-NEXT: pinsrd $1, %ecx, %xmm0 -; X32-SSE41-NEXT: shll $30, %eax +; X32-SSE41-NEXT: shrb %al +; X32-SSE41-NEXT: movzbl %al, %eax +; X32-SSE41-NEXT: negl %eax ; X32-SSE41-NEXT: pinsrd $2, %eax, %xmm0 ; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm0 -; X32-SSE41-NEXT: psrad $31, %xmm0 ; X32-SSE41-NEXT: retl entry: %X = load <2 x i1>, <2 x i1>* %ptr @@ -1483,107 +1490,132 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) { ; SSE2-LABEL: load_sext_4i1_to_4i32: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movl (%rdi), %eax -; SSE2-NEXT: movq %rax, %rcx -; SSE2-NEXT: shlq $60, %rcx -; SSE2-NEXT: sarq $63, %rcx +; SSE2-NEXT: movb (%rdi), %al +; SSE2-NEXT: movl %eax, %ecx +; SSE2-NEXT: shrb $3, %cl +; SSE2-NEXT: movzbl %cl, %ecx +; SSE2-NEXT: negl %ecx ; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: movq %rax, %rcx -; SSE2-NEXT: shlq $61, %rcx -; SSE2-NEXT: sarq $63, %rcx -; SSE2-NEXT: movd %ecx, %xmm1 +; SSE2-NEXT: movzbl %al, %ecx +; SSE2-NEXT: shrb $2, %al +; SSE2-NEXT: movzbl %al, %eax +; SSE2-NEXT: andl $1, %eax +; SSE2-NEXT: negl %eax +; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-NEXT: movq %rax, %rcx -; SSE2-NEXT: shlq $62, %rcx -; SSE2-NEXT: sarq $63, %rcx -; SSE2-NEXT: movd %ecx, %xmm2 -; SSE2-NEXT: shlq $63, %rax -; SSE2-NEXT: sarq $63, %rax +; SSE2-NEXT: movl %ecx, %eax +; SSE2-NEXT: andl $1, %eax +; SSE2-NEXT: negl %eax ; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: shrb %cl +; SSE2-NEXT: movzbl %cl, %eax +; SSE2-NEXT: andl $1, %eax +; SSE2-NEXT: negl %eax +; SSE2-NEXT: movd %eax, %xmm2 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: load_sext_4i1_to_4i32: ; SSSE3: # %bb.0: # %entry -; SSSE3-NEXT: movl (%rdi), %eax -; SSSE3-NEXT: movq %rax, %rcx -; SSSE3-NEXT: shlq $60, %rcx -; SSSE3-NEXT: sarq $63, %rcx +; SSSE3-NEXT: movb (%rdi), %al +; SSSE3-NEXT: movl %eax, %ecx +; SSSE3-NEXT: shrb $3, %cl +; SSSE3-NEXT: movzbl %cl, %ecx +; SSSE3-NEXT: negl %ecx ; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: movq %rax, %rcx -; SSSE3-NEXT: shlq $61, %rcx -; SSSE3-NEXT: sarq $63, %rcx -; SSSE3-NEXT: movd %ecx, %xmm1 +; SSSE3-NEXT: movzbl %al, %ecx +; SSSE3-NEXT: shrb $2, %al +; SSSE3-NEXT: movzbl %al, %eax +; SSSE3-NEXT: andl $1, %eax +; SSSE3-NEXT: negl %eax +; SSSE3-NEXT: movd %eax, %xmm1 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSSE3-NEXT: movq %rax, %rcx -; SSSE3-NEXT: shlq $62, %rcx -; SSSE3-NEXT: sarq $63, %rcx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: shlq $63, %rax -; SSSE3-NEXT: sarq $63, %rax +; SSSE3-NEXT: movl %ecx, %eax +; SSSE3-NEXT: andl $1, %eax +; SSSE3-NEXT: negl %eax ; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: shrb %cl +; SSSE3-NEXT: movzbl %cl, %eax +; SSSE3-NEXT: andl $1, %eax +; SSSE3-NEXT: negl %eax +; SSSE3-NEXT: movd %eax, %xmm2 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: load_sext_4i1_to_4i32: ; SSE41: # %bb.0: # %entry -; SSE41-NEXT: movl (%rdi), %eax -; SSE41-NEXT: movq %rax, %rcx -; SSE41-NEXT: shlq $62, %rcx -; SSE41-NEXT: sarq $63, %rcx -; SSE41-NEXT: movq %rax, %rdx -; SSE41-NEXT: shlq $63, %rdx -; SSE41-NEXT: sarq $63, %rdx +; SSE41-NEXT: movb (%rdi), %al +; SSE41-NEXT: movzbl %al, %ecx +; SSE41-NEXT: shrb %al +; SSE41-NEXT: movzbl %al, %eax +; SSE41-NEXT: andl $1, %eax +; SSE41-NEXT: negl %eax +; SSE41-NEXT: movl %ecx, %edx +; SSE41-NEXT: andl $1, %edx +; SSE41-NEXT: negl %edx ; SSE41-NEXT: movd %edx, %xmm0 -; SSE41-NEXT: pinsrd $1, %ecx, %xmm0 -; SSE41-NEXT: movq %rax, %rcx -; SSE41-NEXT: shlq $61, %rcx -; SSE41-NEXT: sarq $63, %rcx -; SSE41-NEXT: pinsrd $2, %ecx, %xmm0 -; SSE41-NEXT: shlq $60, %rax -; SSE41-NEXT: sarq $63, %rax +; SSE41-NEXT: pinsrd $1, %eax, %xmm0 +; SSE41-NEXT: movl %ecx, %eax +; SSE41-NEXT: shrb $2, %al +; SSE41-NEXT: movzbl %al, %eax +; SSE41-NEXT: andl $1, %eax +; SSE41-NEXT: negl %eax +; SSE41-NEXT: pinsrd $2, %eax, %xmm0 +; SSE41-NEXT: shrb $3, %cl +; SSE41-NEXT: movzbl %cl, %eax +; SSE41-NEXT: negl %eax ; SSE41-NEXT: pinsrd $3, %eax, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: load_sext_4i1_to_4i32: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: movl (%rdi), %eax -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $62, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: movq %rax, %rdx -; AVX1-NEXT: shlq $63, %rdx -; AVX1-NEXT: sarq $63, %rdx +; AVX1-NEXT: movb (%rdi), %al +; AVX1-NEXT: movzbl %al, %ecx +; AVX1-NEXT: shrb %al +; AVX1-NEXT: movzbl %al, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: negl %eax +; AVX1-NEXT: movl %ecx, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: negl %edx ; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $61, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shlq $60, %rax -; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %ecx, %eax +; AVX1-NEXT: shrb $2, %al +; AVX1-NEXT: movzbl %al, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: negl %eax +; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: shrb $3, %cl +; AVX1-NEXT: movzbl %cl, %eax +; AVX1-NEXT: negl %eax ; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_sext_4i1_to_4i32: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: movl (%rdi), %eax -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $62, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: movq %rax, %rdx -; AVX2-NEXT: shlq $63, %rdx -; AVX2-NEXT: sarq $63, %rdx +; AVX2-NEXT: movb (%rdi), %al +; AVX2-NEXT: movzbl %al, %ecx +; AVX2-NEXT: shrb %al +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: negl %eax +; AVX2-NEXT: movl %ecx, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: negl %edx ; AVX2-NEXT: vmovd %edx, %xmm0 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $61, %rcx -; AVX2-NEXT: sarq $63, %rcx -; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shlq $60, %rax -; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: shrb $2, %al +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: negl %eax +; AVX2-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX2-NEXT: shrb $3, %cl +; AVX2-NEXT: movzbl %cl, %eax +; AVX2-NEXT: negl %eax ; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 ; AVX2-NEXT: retq ; @@ -1598,40 +1630,56 @@ ; X32-SSE2-LABEL: load_sext_4i1_to_4i32: ; X32-SSE2: # %bb.0: # %entry ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movl (%eax), %eax +; X32-SSE2-NEXT: movb (%eax), %al ; X32-SSE2-NEXT: movl %eax, %ecx -; X32-SSE2-NEXT: shll $28, %ecx +; X32-SSE2-NEXT: shrb $3, %cl +; X32-SSE2-NEXT: movzbl %cl, %ecx +; X32-SSE2-NEXT: negl %ecx ; X32-SSE2-NEXT: movd %ecx, %xmm0 ; X32-SSE2-NEXT: movl %eax, %ecx -; X32-SSE2-NEXT: shll $29, %ecx +; X32-SSE2-NEXT: shrb $2, %cl +; X32-SSE2-NEXT: movzbl %cl, %ecx +; X32-SSE2-NEXT: andl $1, %ecx +; X32-SSE2-NEXT: negl %ecx ; X32-SSE2-NEXT: movd %ecx, %xmm1 ; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; X32-SSE2-NEXT: movl %eax, %ecx -; X32-SSE2-NEXT: shll $30, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm2 -; X32-SSE2-NEXT: shll $31, %eax -; X32-SSE2-NEXT: movd %eax, %xmm0 +; X32-SSE2-NEXT: movzbl %al, %ecx +; X32-SSE2-NEXT: andl $1, %ecx +; X32-SSE2-NEXT: negl %ecx +; X32-SSE2-NEXT: movd %ecx, %xmm0 +; X32-SSE2-NEXT: shrb %al +; X32-SSE2-NEXT: movzbl %al, %eax +; X32-SSE2-NEXT: andl $1, %eax +; X32-SSE2-NEXT: negl %eax +; X32-SSE2-NEXT: movd %eax, %xmm2 ; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; X32-SSE2-NEXT: psrad $31, %xmm0 ; X32-SSE2-NEXT: retl ; ; X32-SSE41-LABEL: load_sext_4i1_to_4i32: ; X32-SSE41: # %bb.0: # %entry ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movl (%eax), %eax +; X32-SSE41-NEXT: movb (%eax), %al ; X32-SSE41-NEXT: movl %eax, %ecx -; X32-SSE41-NEXT: shll $30, %ecx -; X32-SSE41-NEXT: movl %eax, %edx -; X32-SSE41-NEXT: shll $31, %edx +; X32-SSE41-NEXT: shrb %cl +; X32-SSE41-NEXT: movzbl %cl, %ecx +; X32-SSE41-NEXT: andl $1, %ecx +; X32-SSE41-NEXT: negl %ecx +; X32-SSE41-NEXT: movzbl %al, %edx +; X32-SSE41-NEXT: andl $1, %edx +; X32-SSE41-NEXT: negl %edx ; X32-SSE41-NEXT: movd %edx, %xmm0 ; X32-SSE41-NEXT: pinsrd $1, %ecx, %xmm0 ; X32-SSE41-NEXT: movl %eax, %ecx -; X32-SSE41-NEXT: shll $29, %ecx +; X32-SSE41-NEXT: shrb $2, %cl +; X32-SSE41-NEXT: movzbl %cl, %ecx +; X32-SSE41-NEXT: andl $1, %ecx +; X32-SSE41-NEXT: negl %ecx ; X32-SSE41-NEXT: pinsrd $2, %ecx, %xmm0 -; X32-SSE41-NEXT: shll $28, %eax +; X32-SSE41-NEXT: shrb $3, %al +; X32-SSE41-NEXT: movzbl %al, %eax +; X32-SSE41-NEXT: negl %eax ; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm0 -; X32-SSE41-NEXT: psrad $31, %xmm0 ; X32-SSE41-NEXT: retl entry: %X = load <4 x i1>, <4 x i1>* %ptr @@ -1689,25 +1737,29 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) { ; SSE2-LABEL: load_sext_4i1_to_4i64: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movl (%rdi), %eax +; SSE2-NEXT: movb (%rdi), %al ; SSE2-NEXT: movl %eax, %ecx -; SSE2-NEXT: shrl $3, %ecx -; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: shrb %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movzbl %cl, %ecx +; SSE2-NEXT: movl %eax, %edx +; SSE2-NEXT: andb $1, %dl +; SSE2-NEXT: movzbl %dl, %edx +; SSE2-NEXT: movd %edx, %xmm1 +; SSE2-NEXT: pinsrw $2, %ecx, %xmm1 ; SSE2-NEXT: movl %eax, %ecx -; SSE2-NEXT: shrl $2, %ecx -; SSE2-NEXT: movd %ecx, %xmm1 -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: shrl %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3] +; SSE2-NEXT: shrb $2, %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movzbl %cl, %ecx +; SSE2-NEXT: pinsrw $4, %ecx, %xmm1 +; SSE2-NEXT: shrb $3, %al +; SSE2-NEXT: movzbl %al, %eax +; SSE2-NEXT: pinsrw $6, %eax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3] ; SSE2-NEXT: psllq $63, %xmm0 ; SSE2-NEXT: psrad $31, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3] ; SSE2-NEXT: psllq $63, %xmm1 ; SSE2-NEXT: psrad $31, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] @@ -1715,25 +1767,29 @@ ; ; SSSE3-LABEL: load_sext_4i1_to_4i64: ; SSSE3: # %bb.0: # %entry -; SSSE3-NEXT: movl (%rdi), %eax +; SSSE3-NEXT: movb (%rdi), %al ; SSSE3-NEXT: movl %eax, %ecx -; SSSE3-NEXT: shrl $3, %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: shrb %cl +; SSSE3-NEXT: andb $1, %cl +; SSSE3-NEXT: movzbl %cl, %ecx +; SSSE3-NEXT: movl %eax, %edx +; SSSE3-NEXT: andb $1, %dl +; SSSE3-NEXT: movzbl %dl, %edx +; SSSE3-NEXT: movd %edx, %xmm1 +; SSSE3-NEXT: pinsrw $2, %ecx, %xmm1 ; SSSE3-NEXT: movl %eax, %ecx -; SSSE3-NEXT: shrl $2, %ecx -; SSSE3-NEXT: movd %ecx, %xmm1 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: shrl %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; SSSE3-NEXT: pand {{.*}}(%rip), %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3] +; SSSE3-NEXT: shrb $2, %cl +; SSSE3-NEXT: andb $1, %cl +; SSSE3-NEXT: movzbl %cl, %ecx +; SSSE3-NEXT: pinsrw $4, %ecx, %xmm1 +; SSSE3-NEXT: shrb $3, %al +; SSSE3-NEXT: movzbl %al, %eax +; SSSE3-NEXT: pinsrw $6, %eax, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3] ; SSSE3-NEXT: psllq $63, %xmm0 ; SSSE3-NEXT: psrad $31, %xmm0 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3] ; SSSE3-NEXT: psllq $63, %xmm1 ; SSSE3-NEXT: psrad $31, %xmm1 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] @@ -1741,18 +1797,25 @@ ; ; SSE41-LABEL: load_sext_4i1_to_4i64: ; SSE41: # %bb.0: # %entry -; SSE41-NEXT: movl (%rdi), %eax +; SSE41-NEXT: movb (%rdi), %al ; SSE41-NEXT: movl %eax, %ecx -; SSE41-NEXT: shrl %ecx -; SSE41-NEXT: movd %eax, %xmm1 -; SSE41-NEXT: pinsrd $1, %ecx, %xmm1 +; SSE41-NEXT: shrb %cl +; SSE41-NEXT: andb $1, %cl +; SSE41-NEXT: movzbl %cl, %ecx +; SSE41-NEXT: movl %eax, %edx +; SSE41-NEXT: andb $1, %dl +; SSE41-NEXT: movzbl %dl, %edx +; SSE41-NEXT: movd %edx, %xmm1 +; SSE41-NEXT: pinsrb $4, %ecx, %xmm1 ; SSE41-NEXT: movl %eax, %ecx -; SSE41-NEXT: shrl $2, %ecx +; SSE41-NEXT: shrb $2, %cl +; SSE41-NEXT: andb $1, %cl +; SSE41-NEXT: movzbl %cl, %ecx ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero -; SSE41-NEXT: pinsrd $2, %ecx, %xmm1 -; SSE41-NEXT: shrl $3, %eax -; SSE41-NEXT: pinsrd $3, %eax, %xmm1 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE41-NEXT: pinsrb $8, %ecx, %xmm1 +; SSE41-NEXT: shrb $3, %al +; SSE41-NEXT: movzbl %al, %eax +; SSE41-NEXT: pinsrb $12, %eax, %xmm1 ; SSE41-NEXT: psllq $63, %xmm0 ; SSE41-NEXT: psrad $31, %xmm0 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] @@ -1764,21 +1827,26 @@ ; ; AVX1-LABEL: load_sext_4i1_to_4i64: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: movl (%rdi), %eax -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $62, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: movq %rax, %rdx -; AVX1-NEXT: shlq $63, %rdx -; AVX1-NEXT: sarq $63, %rdx +; AVX1-NEXT: movb (%rdi), %al +; AVX1-NEXT: movzbl %al, %ecx +; AVX1-NEXT: shrb %al +; AVX1-NEXT: movzbl %al, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: negl %eax +; AVX1-NEXT: movl %ecx, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: negl %edx ; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shlq $61, %rcx -; AVX1-NEXT: sarq $63, %rcx -; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shlq $60, %rax -; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %ecx, %eax +; AVX1-NEXT: shrb $2, %al +; AVX1-NEXT: movzbl %al, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: negl %eax +; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: shrb $3, %cl +; AVX1-NEXT: movzbl %cl, %eax +; AVX1-NEXT: negl %eax ; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 ; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] @@ -1788,23 +1856,30 @@ ; ; AVX2-LABEL: load_sext_4i1_to_4i64: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: movl (%rdi), %eax -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $60, %rcx +; AVX2-NEXT: movb (%rdi), %al +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrb $3, %cl +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: negq %rcx ; AVX2-NEXT: vmovq %rcx, %xmm0 -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $61, %rcx -; AVX2-NEXT: vmovq %rcx, %xmm1 +; AVX2-NEXT: movzbl %al, %ecx +; AVX2-NEXT: shrb $2, %al +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: negq %rax +; AVX2-NEXT: vmovq %rax, %xmm1 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shlq $62, %rcx -; AVX2-NEXT: vmovq %rcx, %xmm1 -; AVX2-NEXT: shlq $63, %rax +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: negq %rax +; AVX2-NEXT: vmovq %rax, %xmm1 +; AVX2-NEXT: shrb %cl +; AVX2-NEXT: movzbl %cl, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: negq %rax ; AVX2-NEXT: vmovq %rax, %xmm2 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: load_sext_4i1_to_4i64: @@ -1817,25 +1892,29 @@ ; X32-SSE2-LABEL: load_sext_4i1_to_4i64: ; X32-SSE2: # %bb.0: # %entry ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movzbl (%eax), %eax +; X32-SSE2-NEXT: movb (%eax), %al ; X32-SSE2-NEXT: movl %eax, %ecx -; X32-SSE2-NEXT: shrl $3, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm0 +; X32-SSE2-NEXT: shrb %cl +; X32-SSE2-NEXT: andb $1, %cl +; X32-SSE2-NEXT: movzbl %cl, %ecx +; X32-SSE2-NEXT: movl %eax, %edx +; X32-SSE2-NEXT: andb $1, %dl +; X32-SSE2-NEXT: movzbl %dl, %edx +; X32-SSE2-NEXT: movd %edx, %xmm1 +; X32-SSE2-NEXT: pinsrw $2, %ecx, %xmm1 ; X32-SSE2-NEXT: movl %eax, %ecx -; X32-SSE2-NEXT: shrl $2, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm1 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; X32-SSE2-NEXT: movd %eax, %xmm2 -; X32-SSE2-NEXT: shrl %eax -; X32-SSE2-NEXT: movd %eax, %xmm0 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; X32-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 -; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3] +; X32-SSE2-NEXT: shrb $2, %cl +; X32-SSE2-NEXT: andb $1, %cl +; X32-SSE2-NEXT: movzbl %cl, %ecx +; X32-SSE2-NEXT: pinsrw $4, %ecx, %xmm1 +; X32-SSE2-NEXT: shrb $3, %al +; X32-SSE2-NEXT: movzbl %al, %eax +; X32-SSE2-NEXT: pinsrw $6, %eax, %xmm1 +; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3] ; X32-SSE2-NEXT: psllq $63, %xmm0 ; X32-SSE2-NEXT: psrad $31, %xmm0 ; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3] +; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3] ; X32-SSE2-NEXT: psllq $63, %xmm1 ; X32-SSE2-NEXT: psrad $31, %xmm1 ; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] @@ -1844,18 +1923,25 @@ ; X32-SSE41-LABEL: load_sext_4i1_to_4i64: ; X32-SSE41: # %bb.0: # %entry ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movzbl (%eax), %eax +; X32-SSE41-NEXT: movb (%eax), %al ; X32-SSE41-NEXT: movl %eax, %ecx -; X32-SSE41-NEXT: shrl %ecx -; X32-SSE41-NEXT: movd %eax, %xmm1 -; X32-SSE41-NEXT: pinsrd $1, %ecx, %xmm1 +; X32-SSE41-NEXT: shrb %cl +; X32-SSE41-NEXT: andb $1, %cl +; X32-SSE41-NEXT: movzbl %cl, %ecx +; X32-SSE41-NEXT: movl %eax, %edx +; X32-SSE41-NEXT: andb $1, %dl +; X32-SSE41-NEXT: movzbl %dl, %edx +; X32-SSE41-NEXT: movd %edx, %xmm1 +; X32-SSE41-NEXT: pinsrb $4, %ecx, %xmm1 ; X32-SSE41-NEXT: movl %eax, %ecx -; X32-SSE41-NEXT: shrl $2, %ecx +; X32-SSE41-NEXT: shrb $2, %cl +; X32-SSE41-NEXT: andb $1, %cl +; X32-SSE41-NEXT: movzbl %cl, %ecx ; X32-SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero -; X32-SSE41-NEXT: pinsrd $2, %ecx, %xmm1 -; X32-SSE41-NEXT: shrl $3, %eax -; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm1 -; X32-SSE41-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE41-NEXT: pinsrb $8, %ecx, %xmm1 +; X32-SSE41-NEXT: shrb $3, %al +; X32-SSE41-NEXT: movzbl %al, %eax +; X32-SSE41-NEXT: pinsrb $12, %eax, %xmm1 ; X32-SSE41-NEXT: psllq $63, %xmm0 ; X32-SSE41-NEXT: psrad $31, %xmm0 ; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] @@ -3376,25 +3462,27 @@ ; SSE2-LABEL: sext_4i17_to_4i32: ; SSE2: # %bb.0: ; SSE2-NEXT: movq (%rdi), %rax +; SSE2-NEXT: movl %eax, %ecx +; SSE2-NEXT: shll $15, %ecx +; SSE2-NEXT: sarl $15, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 ; SSE2-NEXT: movq %rax, %rcx -; SSE2-NEXT: shlq $30, %rcx -; SSE2-NEXT: sarq $47, %rcx +; SSE2-NEXT: shrq $17, %rcx +; SSE2-NEXT: shll $15, %ecx +; SSE2-NEXT: sarl $15, %ecx ; SSE2-NEXT: movd %ecx, %xmm1 -; SSE2-NEXT: movq %rax, %rcx -; SSE2-NEXT: shlq $47, %rcx -; SSE2-NEXT: sarq $47, %rcx -; SSE2-NEXT: movd %ecx, %xmm0 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: movl 8(%rdi), %ecx ; SSE2-NEXT: shll $13, %ecx ; SSE2-NEXT: movq %rax, %rdx ; SSE2-NEXT: shrq $51, %rdx ; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: shlq $47, %rdx -; SSE2-NEXT: sarq $47, %rdx +; SSE2-NEXT: shll $15, %edx +; SSE2-NEXT: sarl $15, %edx ; SSE2-NEXT: movd %edx, %xmm1 -; SSE2-NEXT: shlq $13, %rax -; SSE2-NEXT: sarq $47, %rax +; SSE2-NEXT: shrq $34, %rax +; SSE2-NEXT: shll $15, %eax +; SSE2-NEXT: sarl $15, %eax ; SSE2-NEXT: movd %eax, %xmm2 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] @@ -3403,25 +3491,27 @@ ; SSSE3-LABEL: sext_4i17_to_4i32: ; SSSE3: # %bb.0: ; SSSE3-NEXT: movq (%rdi), %rax +; SSSE3-NEXT: movl %eax, %ecx +; SSSE3-NEXT: shll $15, %ecx +; SSSE3-NEXT: sarl $15, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 ; SSSE3-NEXT: movq %rax, %rcx -; SSSE3-NEXT: shlq $30, %rcx -; SSSE3-NEXT: sarq $47, %rcx +; SSSE3-NEXT: shrq $17, %rcx +; SSSE3-NEXT: shll $15, %ecx +; SSSE3-NEXT: sarl $15, %ecx ; SSSE3-NEXT: movd %ecx, %xmm1 -; SSSE3-NEXT: movq %rax, %rcx -; SSSE3-NEXT: shlq $47, %rcx -; SSSE3-NEXT: sarq $47, %rcx -; SSSE3-NEXT: movd %ecx, %xmm0 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSSE3-NEXT: movl 8(%rdi), %ecx ; SSSE3-NEXT: shll $13, %ecx ; SSSE3-NEXT: movq %rax, %rdx ; SSSE3-NEXT: shrq $51, %rdx ; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: shlq $47, %rdx -; SSSE3-NEXT: sarq $47, %rdx +; SSSE3-NEXT: shll $15, %edx +; SSSE3-NEXT: sarl $15, %edx ; SSSE3-NEXT: movd %edx, %xmm1 -; SSSE3-NEXT: shlq $13, %rax -; SSSE3-NEXT: sarq $47, %rax +; SSSE3-NEXT: shrq $34, %rax +; SSSE3-NEXT: shll $15, %eax +; SSSE3-NEXT: sarl $15, %eax ; SSSE3-NEXT: movd %eax, %xmm2 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] @@ -3431,23 +3521,25 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: movq (%rdi), %rax ; SSE41-NEXT: movq %rax, %rcx -; SSE41-NEXT: shlq $30, %rcx -; SSE41-NEXT: sarq $47, %rcx -; SSE41-NEXT: movq %rax, %rdx -; SSE41-NEXT: shlq $47, %rdx -; SSE41-NEXT: sarq $47, %rdx +; SSE41-NEXT: shrq $17, %rcx +; SSE41-NEXT: shll $15, %ecx +; SSE41-NEXT: sarl $15, %ecx +; SSE41-NEXT: movl %eax, %edx +; SSE41-NEXT: shll $15, %edx +; SSE41-NEXT: sarl $15, %edx ; SSE41-NEXT: movd %edx, %xmm0 ; SSE41-NEXT: pinsrd $1, %ecx, %xmm0 ; SSE41-NEXT: movq %rax, %rcx -; SSE41-NEXT: shlq $13, %rcx -; SSE41-NEXT: sarq $47, %rcx +; SSE41-NEXT: shrq $34, %rcx +; SSE41-NEXT: shll $15, %ecx +; SSE41-NEXT: sarl $15, %ecx ; SSE41-NEXT: pinsrd $2, %ecx, %xmm0 ; SSE41-NEXT: movl 8(%rdi), %ecx ; SSE41-NEXT: shll $13, %ecx ; SSE41-NEXT: shrq $51, %rax ; SSE41-NEXT: orl %ecx, %eax -; SSE41-NEXT: shlq $47, %rax -; SSE41-NEXT: sarq $47, %rax +; SSE41-NEXT: shll $15, %eax +; SSE41-NEXT: sarl $15, %eax ; SSE41-NEXT: pinsrd $3, %eax, %xmm0 ; SSE41-NEXT: retq ; @@ -3455,23 +3547,25 @@ ; AVX: # %bb.0: ; AVX-NEXT: movq (%rdi), %rax ; AVX-NEXT: movq %rax, %rcx -; AVX-NEXT: shlq $30, %rcx -; AVX-NEXT: sarq $47, %rcx -; AVX-NEXT: movq %rax, %rdx -; AVX-NEXT: shlq $47, %rdx -; AVX-NEXT: sarq $47, %rdx +; AVX-NEXT: shrq $17, %rcx +; AVX-NEXT: shll $15, %ecx +; AVX-NEXT: sarl $15, %ecx +; AVX-NEXT: movl %eax, %edx +; AVX-NEXT: shll $15, %edx +; AVX-NEXT: sarl $15, %edx ; AVX-NEXT: vmovd %edx, %xmm0 ; AVX-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 ; AVX-NEXT: movq %rax, %rcx -; AVX-NEXT: shlq $13, %rcx -; AVX-NEXT: sarq $47, %rcx +; AVX-NEXT: shrq $34, %rcx +; AVX-NEXT: shll $15, %ecx +; AVX-NEXT: sarl $15, %ecx ; AVX-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 ; AVX-NEXT: movl 8(%rdi), %ecx ; AVX-NEXT: shll $13, %ecx ; AVX-NEXT: shrq $51, %rax ; AVX-NEXT: orl %ecx, %eax -; AVX-NEXT: shlq $47, %rax -; AVX-NEXT: sarq $47, %rax +; AVX-NEXT: shll $15, %eax +; AVX-NEXT: sarl $15, %eax ; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 ; AVX-NEXT: retq ; @@ -3483,19 +3577,22 @@ ; X32-SSE2-NEXT: movl 8(%eax), %eax ; X32-SSE2-NEXT: shldl $13, %edx, %eax ; X32-SSE2-NEXT: shll $15, %eax +; X32-SSE2-NEXT: sarl $15, %eax ; X32-SSE2-NEXT: movd %eax, %xmm0 ; X32-SSE2-NEXT: movl %edx, %eax ; X32-SSE2-NEXT: shll $13, %eax +; X32-SSE2-NEXT: sarl $15, %eax ; X32-SSE2-NEXT: movd %eax, %xmm1 ; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; X32-SSE2-NEXT: shldl $15, %ecx, %edx ; X32-SSE2-NEXT: shll $15, %ecx +; X32-SSE2-NEXT: sarl $15, %ecx ; X32-SSE2-NEXT: movd %ecx, %xmm0 ; X32-SSE2-NEXT: shll $15, %edx +; X32-SSE2-NEXT: sarl $15, %edx ; X32-SSE2-NEXT: movd %edx, %xmm2 ; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; X32-SSE2-NEXT: psrad $15, %xmm0 ; X32-SSE2-NEXT: retl ; ; X32-SSE41-LABEL: sext_4i17_to_4i32: @@ -3511,14 +3608,17 @@ ; X32-SSE41-NEXT: shldl $13, %edx, %eax ; X32-SSE41-NEXT: shldl $15, %ecx, %edx ; X32-SSE41-NEXT: shll $15, %edx +; X32-SSE41-NEXT: sarl $15, %edx ; X32-SSE41-NEXT: shll $15, %ecx +; X32-SSE41-NEXT: sarl $15, %ecx ; X32-SSE41-NEXT: movd %ecx, %xmm0 ; X32-SSE41-NEXT: pinsrd $1, %edx, %xmm0 ; X32-SSE41-NEXT: shll $13, %esi +; X32-SSE41-NEXT: sarl $15, %esi ; X32-SSE41-NEXT: pinsrd $2, %esi, %xmm0 ; X32-SSE41-NEXT: shll $15, %eax +; X32-SSE41-NEXT: sarl $15, %eax ; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm0 -; X32-SSE41-NEXT: psrad $15, %xmm0 ; X32-SSE41-NEXT: popl %esi ; X32-SSE41-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE41-NEXT: retl Index: llvm/test/CodeGen/X86/vector-zext.ll =================================================================== --- llvm/test/CodeGen/X86/vector-zext.ll +++ llvm/test/CodeGen/X86/vector-zext.ll @@ -2326,49 +2326,58 @@ define <4 x i32> @zext_4i17_to_4i32(<4 x i17>* %ptr) { ; SSE2-LABEL: zext_4i17_to_4i32: ; SSE2: # %bb.0: -; SSE2-NEXT: movq (%rdi), %rax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movq %rax, %rcx -; SSE2-NEXT: shrq $17, %rcx -; SSE2-NEXT: movd %ecx, %xmm1 -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-NEXT: movl 8(%rdi), %ecx -; SSE2-NEXT: shll $13, %ecx -; SSE2-NEXT: movq %rax, %rdx +; SSE2-NEXT: movzbl 8(%rdi), %eax +; SSE2-NEXT: andl $15, %eax +; SSE2-NEXT: shll $13, %eax +; SSE2-NEXT: movq (%rdi), %rcx +; SSE2-NEXT: movq %rcx, %rdx ; SSE2-NEXT: shrq $51, %rdx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movd %edx, %xmm1 +; SSE2-NEXT: orl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: movq %rcx, %rax ; SSE2-NEXT: shrq $34, %rax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: andl $131071, %eax # imm = 0x1FFFF +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-NEXT: movl %ecx, %eax +; SSE2-NEXT: andl $131071, %eax # imm = 0x1FFFF +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: shrq $17, %rcx +; SSE2-NEXT: andl $131071, %ecx # imm = 0x1FFFF +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: zext_4i17_to_4i32: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movq (%rdi), %rax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: movq %rax, %rcx -; SSSE3-NEXT: shrq $17, %rcx -; SSSE3-NEXT: movd %ecx, %xmm1 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSSE3-NEXT: movl 8(%rdi), %ecx -; SSSE3-NEXT: shll $13, %ecx -; SSSE3-NEXT: movq %rax, %rdx +; SSSE3-NEXT: movzbl 8(%rdi), %eax +; SSSE3-NEXT: andl $15, %eax +; SSSE3-NEXT: shll $13, %eax +; SSSE3-NEXT: movq (%rdi), %rcx +; SSSE3-NEXT: movq %rcx, %rdx ; SSSE3-NEXT: shrq $51, %rdx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movd %edx, %xmm1 +; SSSE3-NEXT: orl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: movq %rcx, %rax ; SSSE3-NEXT: shrq $34, %rax -; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSSE3-NEXT: andl $131071, %eax # imm = 0x1FFFF +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSSE3-NEXT: movl %ecx, %eax +; SSSE3-NEXT: andl $131071, %eax # imm = 0x1FFFF +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: shrq $17, %rcx +; SSSE3-NEXT: andl $131071, %ecx # imm = 0x1FFFF +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: zext_4i17_to_4i32: ; SSE41: # %bb.0: -; SSE41-NEXT: movl 8(%rdi), %eax +; SSE41-NEXT: movzbl 8(%rdi), %eax +; SSE41-NEXT: andl $15, %eax ; SSE41-NEXT: shll $13, %eax ; SSE41-NEXT: movq (%rdi), %rcx ; SSE41-NEXT: movq %rcx, %rdx @@ -2376,69 +2385,38 @@ ; SSE41-NEXT: orl %eax, %edx ; SSE41-NEXT: movq %rcx, %rax ; SSE41-NEXT: shrq $17, %rax -; SSE41-NEXT: movd %ecx, %xmm0 +; SSE41-NEXT: andl $131071, %eax # imm = 0x1FFFF +; SSE41-NEXT: movl %ecx, %esi +; SSE41-NEXT: andl $131071, %esi # imm = 0x1FFFF +; SSE41-NEXT: movd %esi, %xmm0 ; SSE41-NEXT: pinsrd $1, %eax, %xmm0 ; SSE41-NEXT: shrq $34, %rcx +; SSE41-NEXT: andl $131071, %ecx # imm = 0x1FFFF ; SSE41-NEXT: pinsrd $2, %ecx, %xmm0 ; SSE41-NEXT: pinsrd $3, %edx, %xmm0 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE41-NEXT: retq ; -; AVX1-LABEL: zext_4i17_to_4i32: -; AVX1: # %bb.0: -; AVX1-NEXT: movl 8(%rdi), %eax -; AVX1-NEXT: shll $13, %eax -; AVX1-NEXT: movq (%rdi), %rcx -; AVX1-NEXT: movq %rcx, %rdx -; AVX1-NEXT: shrq $51, %rdx -; AVX1-NEXT: orl %eax, %edx -; AVX1-NEXT: movq %rcx, %rax -; AVX1-NEXT: shrq $17, %rax -; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 -; AVX1-NEXT: shrq $34, %rcx -; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: vpinsrd $3, %edx, %xmm0, %xmm0 -; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: zext_4i17_to_4i32: -; AVX2: # %bb.0: -; AVX2-NEXT: movl 8(%rdi), %eax -; AVX2-NEXT: shll $13, %eax -; AVX2-NEXT: movq (%rdi), %rcx -; AVX2-NEXT: movq %rcx, %rdx -; AVX2-NEXT: shrq $51, %rdx -; AVX2-NEXT: orl %eax, %edx -; AVX2-NEXT: movq %rcx, %rax -; AVX2-NEXT: shrq $17, %rax -; AVX2-NEXT: vmovd %ecx, %xmm0 -; AVX2-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 -; AVX2-NEXT: shrq $34, %rcx -; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: vpinsrd $3, %edx, %xmm0, %xmm0 -; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [131071,131071,131071,131071] -; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512-LABEL: zext_4i17_to_4i32: -; AVX512: # %bb.0: -; AVX512-NEXT: movl 8(%rdi), %eax -; AVX512-NEXT: shll $13, %eax -; AVX512-NEXT: movq (%rdi), %rcx -; AVX512-NEXT: movq %rcx, %rdx -; AVX512-NEXT: shrq $51, %rdx -; AVX512-NEXT: orl %eax, %edx -; AVX512-NEXT: movq %rcx, %rax -; AVX512-NEXT: shrq $17, %rax -; AVX512-NEXT: vmovd %ecx, %xmm0 -; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 -; AVX512-NEXT: shrq $34, %rcx -; AVX512-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX512-NEXT: vpinsrd $3, %edx, %xmm0, %xmm0 -; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm1 = [131071,131071,131071,131071] -; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: retq +; AVX-LABEL: zext_4i17_to_4i32: +; AVX: # %bb.0: +; AVX-NEXT: movzbl 8(%rdi), %eax +; AVX-NEXT: andl $15, %eax +; AVX-NEXT: shll $13, %eax +; AVX-NEXT: movq (%rdi), %rcx +; AVX-NEXT: movq %rcx, %rdx +; AVX-NEXT: shrq $51, %rdx +; AVX-NEXT: orl %eax, %edx +; AVX-NEXT: movq %rcx, %rax +; AVX-NEXT: shrq $17, %rax +; AVX-NEXT: andl $131071, %eax # imm = 0x1FFFF +; AVX-NEXT: movl %ecx, %esi +; AVX-NEXT: andl $131071, %esi # imm = 0x1FFFF +; AVX-NEXT: vmovd %esi, %xmm0 +; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX-NEXT: shrq $34, %rcx +; AVX-NEXT: andl $131071, %ecx # imm = 0x1FFFF +; AVX-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 +; AVX-NEXT: vpinsrd $3, %edx, %xmm0, %xmm0 +; AVX-NEXT: retq %a = load <4 x i17>, <4 x i17>* %ptr %b = zext <4 x i17> %a to <4 x i32> ret <4 x i32> %b