Index: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -306,21 +306,24 @@ BitConvertToInteger(GetScalarizedVector(InOp))); break; case TargetLowering::TypeSplitVector: { - // For example, i32 = BITCAST v2i16 on alpha. Convert the split - // pieces of the input into integers and reassemble in the final type. - SDValue Lo, Hi; - GetSplitVector(N->getOperand(0), Lo, Hi); - Lo = BitConvertToInteger(Lo); - Hi = BitConvertToInteger(Hi); - - if (DAG.getDataLayout().isBigEndian()) - std::swap(Lo, Hi); - - InOp = DAG.getNode(ISD::ANY_EXTEND, dl, - EVT::getIntegerVT(*DAG.getContext(), - NOutVT.getSizeInBits()), - JoinIntegers(Lo, Hi)); - return DAG.getNode(ISD::BITCAST, dl, NOutVT, InOp); + if (!NOutVT.isVector()) { + // For example, i32 = BITCAST v2i16 on alpha. Convert the split + // pieces of the input into integers and reassemble in the final type. + SDValue Lo, Hi; + GetSplitVector(N->getOperand(0), Lo, Hi); + Lo = BitConvertToInteger(Lo); + Hi = BitConvertToInteger(Hi); + + if (DAG.getDataLayout().isBigEndian()) + std::swap(Lo, Hi); + + InOp = DAG.getNode(ISD::ANY_EXTEND, dl, + EVT::getIntegerVT(*DAG.getContext(), + NOutVT.getSizeInBits()), + JoinIntegers(Lo, Hi)); + return DAG.getNode(ISD::BITCAST, dl, NOutVT, InOp); + } + break; } case TargetLowering::TypeWidenVector: // The input is widened to the same size. Convert to the widened value. Index: llvm/trunk/test/CodeGen/X86/bitcast-vector-bool.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/bitcast-vector-bool.ll +++ llvm/trunk/test/CodeGen/X86/bitcast-vector-bool.ll @@ -750,11 +750,782 @@ define i32 @bitcast_v64i8_to_v2i32(<64 x i8> %a0) nounwind { ; SSE2-SSSE3-LABEL: bitcast_v64i8_to_v2i32: ; SSE2-SSSE3: # %bb.0: +; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm4 +; SSE2-SSSE3-NEXT: pxor %xmm5, %xmm5 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm5 +; SSE2-SSSE3-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm2, %xmm3 +; SSE2-SSSE3-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pcmpgtb %xmm0, %xmm4 +; SSE2-SSSE3-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rcx,%rax,2), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rax,%rcx,4), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rax,%rcx,8), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $4, %ecx +; SSE2-SSSE3-NEXT: orl %eax, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: shll $5, %eax +; SSE2-SSSE3-NEXT: orl %ecx, %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $6, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $7, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $8, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $9, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $10, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $11, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $12, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $13, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $14, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: shll $15, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: orl %eax, %edx +; SSE2-SSSE3-NEXT: movw %dx, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rcx,%rax,2), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rax,%rcx,4), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rax,%rcx,8), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $4, %ecx +; SSE2-SSSE3-NEXT: orl %eax, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: shll $5, %eax +; SSE2-SSSE3-NEXT: orl %ecx, %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $6, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $7, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $8, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $9, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $10, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $11, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $12, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $13, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $14, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: shll $15, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: orl %eax, %edx +; SSE2-SSSE3-NEXT: movw %dx, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rcx,%rax,2), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rax,%rcx,4), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rax,%rcx,8), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $4, %ecx +; SSE2-SSSE3-NEXT: orl %eax, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: shll $5, %eax +; SSE2-SSSE3-NEXT: orl %ecx, %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $6, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $7, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $8, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $9, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $10, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $11, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $12, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $13, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $14, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: shll $15, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: orl %eax, %edx +; SSE2-SSSE3-NEXT: movw %dx, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rcx,%rax,2), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rax,%rcx,4), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: leal (%rax,%rcx,8), %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $4, %ecx +; SSE2-SSSE3-NEXT: orl %eax, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: shll $5, %eax +; SSE2-SSSE3-NEXT: orl %ecx, %eax +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $6, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $7, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $8, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $9, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $10, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $11, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $12, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: andl $1, %edx +; SSE2-SSSE3-NEXT: shll $13, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: shll $14, %ecx +; SSE2-SSSE3-NEXT: orl %edx, %ecx +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-SSSE3-NEXT: shll $15, %edx +; SSE2-SSSE3-NEXT: orl %ecx, %edx +; SSE2-SSSE3-NEXT: orl %eax, %edx +; SSE2-SSSE3-NEXT: movw %dx, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE2-SSSE3-NEXT: movd %xmm0, %ecx +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,0,1] +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: addl %ecx, %eax ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: bitcast_v64i8_to_v2i32: -; AVX12: # %bb.0: -; AVX12-NEXT: retq +; AVX1-LABEL: bitcast_v64i8_to_v2i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm3 +; AVX1-NEXT: vpextrb $1, %xmm3, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpextrb $0, %xmm3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: leal (%rcx,%rax,2), %eax +; AVX1-NEXT: vpextrb $2, %xmm3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: leal (%rax,%rcx,4), %eax +; AVX1-NEXT: vpextrb $3, %xmm3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: leal (%rax,%rcx,8), %eax +; AVX1-NEXT: vpextrb $4, %xmm3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $4, %ecx +; AVX1-NEXT: orl %eax, %ecx +; AVX1-NEXT: vpextrb $5, %xmm3, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: shll $5, %eax +; AVX1-NEXT: orl %ecx, %eax +; AVX1-NEXT: vpextrb $6, %xmm3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $6, %ecx +; AVX1-NEXT: vpextrb $7, %xmm3, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $7, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $8, %xmm3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $8, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $9, %xmm3, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $9, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $10, %xmm3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $10, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $11, %xmm3, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $11, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $12, %xmm3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $12, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $13, %xmm3, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $13, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $14, %xmm3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $14, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $15, %xmm3, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $15, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 +; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpextrb $0, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $16, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $1, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $17, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $2, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $18, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $3, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $19, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $4, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $20, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $5, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $21, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $6, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $22, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $7, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $23, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $8, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $24, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $9, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $25, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $10, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $26, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $11, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $27, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $12, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $28, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $13, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $29, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $14, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $30, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $15, %xmm1, %edx +; AVX1-NEXT: shll $31, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: orl %eax, %edx +; AVX1-NEXT: movl %edx, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm1 +; AVX1-NEXT: vpextrb $1, %xmm1, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpextrb $0, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: leal (%rcx,%rax,2), %eax +; AVX1-NEXT: vpextrb $2, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: leal (%rax,%rcx,4), %eax +; AVX1-NEXT: vpextrb $3, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: leal (%rax,%rcx,8), %eax +; AVX1-NEXT: vpextrb $4, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $4, %ecx +; AVX1-NEXT: orl %eax, %ecx +; AVX1-NEXT: vpextrb $5, %xmm1, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: shll $5, %eax +; AVX1-NEXT: orl %ecx, %eax +; AVX1-NEXT: vpextrb $6, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $6, %ecx +; AVX1-NEXT: vpextrb $7, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $7, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $8, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $8, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $9, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $9, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $10, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $10, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $11, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $11, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $12, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $12, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $13, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $13, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $14, %xmm1, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $14, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $15, %xmm1, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $15, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpextrb $0, %xmm0, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $16, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $1, %xmm0, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $17, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $2, %xmm0, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $18, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $3, %xmm0, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $19, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $4, %xmm0, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $20, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $5, %xmm0, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $21, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $6, %xmm0, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $22, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $7, %xmm0, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $23, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $8, %xmm0, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $24, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $9, %xmm0, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $25, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $10, %xmm0, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $26, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $11, %xmm0, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $27, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $12, %xmm0, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $28, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $13, %xmm0, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: shll $29, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpextrb $14, %xmm0, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: shll $30, %ecx +; AVX1-NEXT: orl %edx, %ecx +; AVX1-NEXT: vpextrb $15, %xmm0, %edx +; AVX1-NEXT: shll $31, %edx +; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: orl %eax, %edx +; AVX1-NEXT: movl %edx, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX1-NEXT: vmovd %xmm0, %ecx +; AVX1-NEXT: vpextrd $1, %xmm0, %eax +; AVX1-NEXT: addl %ecx, %eax +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: bitcast_v64i8_to_v2i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpgtb %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vpextrb $1, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpextrb $0, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: leal (%rcx,%rax,2), %eax +; AVX2-NEXT: vpextrb $2, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: leal (%rax,%rcx,4), %eax +; AVX2-NEXT: vpextrb $3, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: leal (%rax,%rcx,8), %eax +; AVX2-NEXT: vpextrb $4, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $4, %ecx +; AVX2-NEXT: orl %eax, %ecx +; AVX2-NEXT: vpextrb $5, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: shll $5, %eax +; AVX2-NEXT: orl %ecx, %eax +; AVX2-NEXT: vpextrb $6, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $6, %ecx +; AVX2-NEXT: vpextrb $7, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $7, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $8, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $8, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $9, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $9, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $10, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $10, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $11, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $11, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $12, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $12, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $13, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $13, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $14, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $14, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $15, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $15, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-NEXT: vpextrb $0, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $16, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $1, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $17, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $2, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $18, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $3, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $19, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $4, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $20, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $5, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $21, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $6, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $22, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $7, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $23, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $8, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $24, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $9, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $25, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $10, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $26, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $11, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $27, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $12, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $28, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $13, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $29, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $14, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $30, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $15, %xmm1, %edx +; AVX2-NEXT: shll $31, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: orl %eax, %edx +; AVX2-NEXT: movl %edx, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpextrb $1, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpextrb $0, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: leal (%rcx,%rax,2), %eax +; AVX2-NEXT: vpextrb $2, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: leal (%rax,%rcx,4), %eax +; AVX2-NEXT: vpextrb $3, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: leal (%rax,%rcx,8), %eax +; AVX2-NEXT: vpextrb $4, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $4, %ecx +; AVX2-NEXT: orl %eax, %ecx +; AVX2-NEXT: vpextrb $5, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: shll $5, %eax +; AVX2-NEXT: orl %ecx, %eax +; AVX2-NEXT: vpextrb $6, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $6, %ecx +; AVX2-NEXT: vpextrb $7, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $7, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $8, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $8, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $9, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $9, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $10, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $10, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $11, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $11, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $12, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $12, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $13, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $13, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $14, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $14, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $15, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $15, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vpextrb $0, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $16, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $1, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $17, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $2, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $18, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $3, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $19, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $4, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $20, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $5, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $21, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $6, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $22, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $7, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $23, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $8, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $24, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $9, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $25, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $10, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $26, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $11, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $27, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $12, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $28, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $13, %xmm0, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: shll $29, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: vpextrb $14, %xmm0, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: shll $30, %ecx +; AVX2-NEXT: orl %edx, %ecx +; AVX2-NEXT: vpextrb $15, %xmm0, %edx +; AVX2-NEXT: shll $31, %edx +; AVX2-NEXT: orl %ecx, %edx +; AVX2-NEXT: orl %eax, %edx +; AVX2-NEXT: movl %edx, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX2-NEXT: vmovd %xmm0, %ecx +; AVX2-NEXT: vpextrd $1, %xmm0, %eax +; AVX2-NEXT: addl %ecx, %eax +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq ; ; AVX512-LABEL: bitcast_v64i8_to_v2i32: ; AVX512: # %bb.0: Index: llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll +++ llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll @@ -1579,26 +1579,22 @@ define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) nounwind { ; ALL-LABEL: cvt_4f32_to_4i16: ; ALL: # %bb.0: -; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; ALL-NEXT: vmovd %xmm1, %eax -; ALL-NEXT: shll $16, %eax ; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; ALL-NEXT: vmovd %xmm1, %ecx -; ALL-NEXT: movzwl %cx, %ecx -; ALL-NEXT: orl %eax, %ecx +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) ; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] ; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; ALL-NEXT: vmovd %xmm1, %eax -; ALL-NEXT: shll $16, %eax -; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; ALL-NEXT: vmovd %xmm0, %edx -; ALL-NEXT: movzwl %dx, %edx -; ALL-NEXT: orl %eax, %edx -; ALL-NEXT: shlq $32, %rdx -; ALL-NEXT: orq %rcx, %rdx -; ALL-NEXT: vmovq %rdx, %xmm0 +; ALL-NEXT: vmovd %xmm0, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; ALL-NEXT: retq %1 = fptrunc <4 x float> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> @@ -1608,27 +1604,22 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind { ; ALL-LABEL: cvt_4f32_to_8i16_undef: ; ALL: # %bb.0: -; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; ALL-NEXT: vmovd %xmm1, %eax -; ALL-NEXT: shll $16, %eax ; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; ALL-NEXT: vmovd %xmm1, %ecx -; ALL-NEXT: movzwl %cx, %ecx -; ALL-NEXT: orl %eax, %ecx +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) ; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] ; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; ALL-NEXT: vmovd %xmm1, %eax -; ALL-NEXT: shll $16, %eax -; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; ALL-NEXT: vmovd %xmm0, %edx -; ALL-NEXT: movzwl %dx, %edx -; ALL-NEXT: orl %eax, %edx -; ALL-NEXT: shlq $32, %rdx -; ALL-NEXT: orq %rcx, %rdx -; ALL-NEXT: vmovq %rdx, %xmm0 -; ALL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] +; ALL-NEXT: vmovd %xmm0, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; ALL-NEXT: retq %1 = fptrunc <4 x float> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> @@ -1637,133 +1628,25 @@ } define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind { -; AVX1-LABEL: cvt_4f32_to_8i16_zero: -; AVX1: # %bb.0: -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movzwl %cx, %ecx -; AVX1-NEXT: orl %eax, %ecx -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %edx -; AVX1-NEXT: movzwl %dx, %edx -; AVX1-NEXT: orl %eax, %edx -; AVX1-NEXT: shlq $32, %rdx -; AVX1-NEXT: orq %rcx, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm0 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX1-NEXT: retq -; -; AVX2-SLOW-LABEL: cvt_4f32_to_8i16_zero: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-SLOW-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-SLOW-NEXT: vmovd %xmm1, %eax -; AVX2-SLOW-NEXT: shll $16, %eax -; AVX2-SLOW-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX2-SLOW-NEXT: vmovd %xmm1, %ecx -; AVX2-SLOW-NEXT: movzwl %cx, %ecx -; AVX2-SLOW-NEXT: orl %eax, %ecx -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-SLOW-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-SLOW-NEXT: vmovd %xmm1, %eax -; AVX2-SLOW-NEXT: shll $16, %eax -; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-SLOW-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-SLOW-NEXT: vmovd %xmm0, %edx -; AVX2-SLOW-NEXT: movzwl %dx, %edx -; AVX2-SLOW-NEXT: orl %eax, %edx -; AVX2-SLOW-NEXT: shlq $32, %rdx -; AVX2-SLOW-NEXT: orq %rcx, %rdx -; AVX2-SLOW-NEXT: vmovq %rdx, %xmm0 -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX2-SLOW-NEXT: retq -; -; AVX2-FAST-LABEL: cvt_4f32_to_8i16_zero: -; AVX2-FAST: # %bb.0: -; AVX2-FAST-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-FAST-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-FAST-NEXT: vmovd %xmm1, %eax -; AVX2-FAST-NEXT: shll $16, %eax -; AVX2-FAST-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX2-FAST-NEXT: vmovd %xmm1, %ecx -; AVX2-FAST-NEXT: movzwl %cx, %ecx -; AVX2-FAST-NEXT: orl %eax, %ecx -; AVX2-FAST-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-FAST-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-FAST-NEXT: vmovd %xmm1, %eax -; AVX2-FAST-NEXT: shll $16, %eax -; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-FAST-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-FAST-NEXT: vmovd %xmm0, %edx -; AVX2-FAST-NEXT: movzwl %dx, %edx -; AVX2-FAST-NEXT: orl %eax, %edx -; AVX2-FAST-NEXT: shlq $32, %rdx -; AVX2-FAST-NEXT: orq %rcx, %rdx -; AVX2-FAST-NEXT: vmovq %rdx, %xmm0 -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-FAST-NEXT: retq -; -; AVX512F-LABEL: cvt_4f32_to_8i16_zero: -; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: shll $16, %eax -; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: movzwl %cx, %ecx -; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: shll $16, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: movzwl %dx, %edx -; AVX512F-NEXT: orl %eax, %edx -; AVX512F-NEXT: shlq $32, %rdx -; AVX512F-NEXT: orq %rcx, %rdx -; AVX512F-NEXT: vmovq %rdx, %xmm0 -; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_4f32_to_8i16_zero: -; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: movzwl %cx, %ecx -; AVX512VL-NEXT: orl %eax, %ecx -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %edx -; AVX512VL-NEXT: movzwl %dx, %edx -; AVX512VL-NEXT: orl %eax, %edx -; AVX512VL-NEXT: shlq $32, %rdx -; AVX512VL-NEXT: orq %rcx, %rdx -; AVX512VL-NEXT: vmovq %rdx, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_4f32_to_8i16_zero: +; ALL: # %bb.0: +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: retq %1 = fptrunc <4 x float> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> %3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> @@ -2074,28 +1957,23 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounwind { ; ALL-LABEL: store_cvt_4f32_to_8i16_undef: ; ALL: # %bb.0: -; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; ALL-NEXT: vmovd %xmm1, %eax -; ALL-NEXT: shll $16, %eax ; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; ALL-NEXT: vmovd %xmm1, %ecx -; ALL-NEXT: movzwl %cx, %ecx -; ALL-NEXT: orl %eax, %ecx +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) ; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] ; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; ALL-NEXT: vmovd %xmm1, %eax -; ALL-NEXT: shll $16, %eax -; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; ALL-NEXT: vmovd %xmm0, %edx -; ALL-NEXT: movzwl %dx, %edx -; ALL-NEXT: orl %eax, %edx -; ALL-NEXT: shlq $32, %rdx -; ALL-NEXT: orq %rcx, %rdx -; ALL-NEXT: vmovq %rdx, %xmm0 -; ALL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; ALL-NEXT: vmovdqa %xmm0, (%rdi) +; ALL-NEXT: vmovd %xmm0, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovaps %xmm0, (%rdi) ; ALL-NEXT: retq %1 = fptrunc <4 x float> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> @@ -2105,138 +1983,26 @@ } define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwind { -; AVX1-LABEL: store_cvt_4f32_to_8i16_zero: -; AVX1: # %bb.0: -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movzwl %cx, %ecx -; AVX1-NEXT: orl %eax, %ecx -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %edx -; AVX1-NEXT: movzwl %dx, %edx -; AVX1-NEXT: orl %eax, %edx -; AVX1-NEXT: shlq $32, %rdx -; AVX1-NEXT: orq %rcx, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm0 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX1-NEXT: vmovdqa %xmm0, (%rdi) -; AVX1-NEXT: retq -; -; AVX2-SLOW-LABEL: store_cvt_4f32_to_8i16_zero: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-SLOW-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-SLOW-NEXT: vmovd %xmm1, %eax -; AVX2-SLOW-NEXT: shll $16, %eax -; AVX2-SLOW-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX2-SLOW-NEXT: vmovd %xmm1, %ecx -; AVX2-SLOW-NEXT: movzwl %cx, %ecx -; AVX2-SLOW-NEXT: orl %eax, %ecx -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-SLOW-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-SLOW-NEXT: vmovd %xmm1, %eax -; AVX2-SLOW-NEXT: shll $16, %eax -; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-SLOW-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-SLOW-NEXT: vmovd %xmm0, %edx -; AVX2-SLOW-NEXT: movzwl %dx, %edx -; AVX2-SLOW-NEXT: orl %eax, %edx -; AVX2-SLOW-NEXT: shlq $32, %rdx -; AVX2-SLOW-NEXT: orq %rcx, %rdx -; AVX2-SLOW-NEXT: vmovq %rdx, %xmm0 -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX2-SLOW-NEXT: vmovdqa %xmm0, (%rdi) -; AVX2-SLOW-NEXT: retq -; -; AVX2-FAST-LABEL: store_cvt_4f32_to_8i16_zero: -; AVX2-FAST: # %bb.0: -; AVX2-FAST-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-FAST-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-FAST-NEXT: vmovd %xmm1, %eax -; AVX2-FAST-NEXT: shll $16, %eax -; AVX2-FAST-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX2-FAST-NEXT: vmovd %xmm1, %ecx -; AVX2-FAST-NEXT: movzwl %cx, %ecx -; AVX2-FAST-NEXT: orl %eax, %ecx -; AVX2-FAST-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-FAST-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-FAST-NEXT: vmovd %xmm1, %eax -; AVX2-FAST-NEXT: shll $16, %eax -; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-FAST-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-FAST-NEXT: vmovd %xmm0, %edx -; AVX2-FAST-NEXT: movzwl %dx, %edx -; AVX2-FAST-NEXT: orl %eax, %edx -; AVX2-FAST-NEXT: shlq $32, %rdx -; AVX2-FAST-NEXT: orq %rcx, %rdx -; AVX2-FAST-NEXT: vmovq %rdx, %xmm0 -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-FAST-NEXT: vmovdqa %xmm0, (%rdi) -; AVX2-FAST-NEXT: retq -; -; AVX512F-LABEL: store_cvt_4f32_to_8i16_zero: -; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: shll $16, %eax -; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: movzwl %cx, %ecx -; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: shll $16, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: movzwl %dx, %edx -; AVX512F-NEXT: orl %eax, %edx -; AVX512F-NEXT: shlq $32, %rdx -; AVX512F-NEXT: orq %rcx, %rdx -; AVX512F-NEXT: vmovq %rdx, %xmm0 -; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX512F-NEXT: vmovdqa %xmm0, (%rdi) -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: store_cvt_4f32_to_8i16_zero: -; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: movzwl %cx, %ecx -; AVX512VL-NEXT: orl %eax, %ecx -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %edx -; AVX512VL-NEXT: movzwl %dx, %edx -; AVX512VL-NEXT: orl %eax, %edx -; AVX512VL-NEXT: shlq $32, %rdx -; AVX512VL-NEXT: orq %rcx, %rdx -; AVX512VL-NEXT: vmovq %rdx, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero -; AVX512VL-NEXT: vmovdqa %xmm0, (%rdi) -; AVX512VL-NEXT: retq +; ALL-LABEL: store_cvt_4f32_to_8i16_zero: +; ALL: # %bb.0: +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %eax +; ALL-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovaps %xmm0, (%rdi) +; ALL-NEXT: retq %1 = fptrunc <4 x float> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> %3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> @@ -2510,20 +2276,16 @@ define <2 x i16> @cvt_2f64_to_2i16(<2 x double> %a0) nounwind { ; ALL-LABEL: cvt_2f64_to_2i16: ; ALL: # %bb.0: -; ALL-NEXT: pushq %rbx -; ALL-NEXT: subq $16, %rsp -; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; ALL-NEXT: subq $40, %rsp +; ALL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; ALL-NEXT: callq __truncdfhf2 -; ALL-NEXT: movl %eax, %ebx -; ALL-NEXT: shll $16, %ebx -; ALL-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] ; ALL-NEXT: callq __truncdfhf2 -; ALL-NEXT: movzwl %ax, %eax -; ALL-NEXT: orl %ebx, %eax -; ALL-NEXT: vmovd %eax, %xmm0 -; ALL-NEXT: addq $16, %rsp -; ALL-NEXT: popq %rbx +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero +; ALL-NEXT: addq $40, %rsp ; ALL-NEXT: retq %1 = fptrunc <2 x double> %a0 to <2 x half> %2 = bitcast <2 x half> %1 to <2 x i16> @@ -2531,23 +2293,145 @@ } define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind { -; AVX1-LABEL: cvt_4f64_to_4i16: +; ALL-LABEL: cvt_4f64_to_4i16: +; ALL: # %bb.0: +; ALL-NEXT: subq $88, %rsp +; ALL-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0 +; ALL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; ALL-NEXT: vzeroupper +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; ALL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; ALL-NEXT: vzeroupper +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; ALL-NEXT: addq $88, %rsp +; ALL-NEXT: retq + %1 = fptrunc <4 x double> %a0 to <4 x half> + %2 = bitcast <4 x half> %1 to <4 x i16> + ret <4 x i16> %2 +} + +define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind { +; ALL-LABEL: cvt_4f64_to_8i16_undef: +; ALL: # %bb.0: +; ALL-NEXT: subq $88, %rsp +; ALL-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0 +; ALL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; ALL-NEXT: vzeroupper +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; ALL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; ALL-NEXT: vzeroupper +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: addq $88, %rsp +; ALL-NEXT: retq + %1 = fptrunc <4 x double> %a0 to <4 x half> + %2 = bitcast <4 x half> %1 to <4 x i16> + %3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> + ret <8 x i16> %3 +} + +define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind { +; ALL-LABEL: cvt_4f64_to_8i16_zero: +; ALL: # %bb.0: +; ALL-NEXT: subq $88, %rsp +; ALL-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0 +; ALL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; ALL-NEXT: vzeroupper +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; ALL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; ALL-NEXT: vzeroupper +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: addq $88, %rsp +; ALL-NEXT: retq + %1 = fptrunc <4 x double> %a0 to <4 x half> + %2 = bitcast <4 x half> %1 to <4 x i16> + %3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> + ret <8 x i16> %3 +} + +define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind { +; AVX1-LABEL: cvt_8f64_to_8i16: ; AVX1: # %bb.0: +; AVX1-NEXT: pushq %r15 ; AVX1-NEXT: pushq %r14 ; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: subq $40, %rsp -; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill +; AVX1-NEXT: subq $64, %rsp +; AVX1-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill +; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movl %eax, %ebx ; AVX1-NEXT: shll $16, %ebx -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 +; AVX1-NEXT: movzwl %ax, %r15d +; AVX1-NEXT: orl %ebx, %r15d +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: callq __truncdfhf2 +; AVX1-NEXT: movl %eax, %ebx +; AVX1-NEXT: shll $16, %ebx +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movzwl %ax, %r14d ; AVX1-NEXT: orl %ebx, %r14d +; AVX1-NEXT: shlq $32, %r14 +; AVX1-NEXT: orq %r15, %r14 +; AVX1-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload +; AVX1-NEXT: # xmm0 = mem[1,0] +; AVX1-NEXT: callq __truncdfhf2 +; AVX1-NEXT: movl %eax, %ebx +; AVX1-NEXT: shll $16, %ebx +; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: callq __truncdfhf2 +; AVX1-NEXT: movzwl %ax, %r15d +; AVX1-NEXT: orl %ebx, %r15d ; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill @@ -2561,30 +2445,60 @@ ; AVX1-NEXT: movzwl %ax, %eax ; AVX1-NEXT: orl %ebx, %eax ; AVX1-NEXT: shlq $32, %rax -; AVX1-NEXT: orq %r14, %rax +; AVX1-NEXT: orq %r15, %rax ; AVX1-NEXT: vmovq %rax, %xmm0 -; AVX1-NEXT: addq $40, %rsp +; AVX1-NEXT: vmovq %r14, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: addq $64, %rsp ; AVX1-NEXT: popq %rbx ; AVX1-NEXT: popq %r14 +; AVX1-NEXT: popq %r15 ; AVX1-NEXT: retq ; -; AVX2-LABEL: cvt_4f64_to_4i16: +; AVX2-LABEL: cvt_8f64_to_8i16: ; AVX2: # %bb.0: +; AVX2-NEXT: pushq %r15 ; AVX2-NEXT: pushq %r14 ; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: subq $40, %rsp -; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill +; AVX2-NEXT: subq $64, %rsp +; AVX2-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill +; AVX2-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movl %eax, %ebx ; AVX2-NEXT: shll $16, %ebx -; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload +; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 +; AVX2-NEXT: movzwl %ax, %r15d +; AVX2-NEXT: orl %ebx, %r15d +; AVX2-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: callq __truncdfhf2 +; AVX2-NEXT: movl %eax, %ebx +; AVX2-NEXT: shll $16, %ebx +; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movzwl %ax, %r14d ; AVX2-NEXT: orl %ebx, %r14d +; AVX2-NEXT: shlq $32, %r14 +; AVX2-NEXT: orq %r15, %r14 +; AVX2-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload +; AVX2-NEXT: # xmm0 = mem[1,0] +; AVX2-NEXT: callq __truncdfhf2 +; AVX2-NEXT: movl %eax, %ebx +; AVX2-NEXT: shll $16, %ebx +; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: callq __truncdfhf2 +; AVX2-NEXT: movzwl %ax, %r15d +; AVX2-NEXT: orl %ebx, %r15d ; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill @@ -2598,515 +2512,23 @@ ; AVX2-NEXT: movzwl %ax, %eax ; AVX2-NEXT: orl %ebx, %eax ; AVX2-NEXT: shlq $32, %rax -; AVX2-NEXT: orq %r14, %rax +; AVX2-NEXT: orq %r15, %rax ; AVX2-NEXT: vmovq %rax, %xmm0 -; AVX2-NEXT: addq $40, %rsp +; AVX2-NEXT: vmovq %r14, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: addq $64, %rsp ; AVX2-NEXT: popq %rbx ; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 ; AVX2-NEXT: retq ; -; AVX512-LABEL: cvt_4f64_to_4i16: +; AVX512-LABEL: cvt_8f64_to_8i16: ; AVX512: # %bb.0: +; AVX512-NEXT: pushq %r15 ; AVX512-NEXT: pushq %r14 ; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: subq $40, %rsp -; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movl %eax, %ebx -; AVX512-NEXT: shll $16, %ebx -; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movzwl %ax, %r14d -; AVX512-NEXT: orl %ebx, %r14d -; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movl %eax, %ebx -; AVX512-NEXT: shll $16, %ebx -; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movzwl %ax, %eax -; AVX512-NEXT: orl %ebx, %eax -; AVX512-NEXT: shlq $32, %rax -; AVX512-NEXT: orq %r14, %rax -; AVX512-NEXT: vmovq %rax, %xmm0 -; AVX512-NEXT: addq $40, %rsp -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: retq - %1 = fptrunc <4 x double> %a0 to <4 x half> - %2 = bitcast <4 x half> %1 to <4 x i16> - ret <4 x i16> %2 -} - -define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind { -; AVX1-LABEL: cvt_4f64_to_8i16_undef: -; AVX1: # %bb.0: -; AVX1-NEXT: pushq %r14 -; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: subq $40, %rsp -; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebx -; AVX1-NEXT: shll $16, %ebx -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %r14d -; AVX1-NEXT: orl %ebx, %r14d -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebx -; AVX1-NEXT: shll $16, %ebx -; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: orl %ebx, %eax -; AVX1-NEXT: shlq $32, %rax -; AVX1-NEXT: orq %r14, %rax -; AVX1-NEXT: vmovq %rax, %xmm0 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX1-NEXT: addq $40, %rsp -; AVX1-NEXT: popq %rbx -; AVX1-NEXT: popq %r14 -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_4f64_to_8i16_undef: -; AVX2: # %bb.0: -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: subq $40, %rsp -; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movl %eax, %ebx -; AVX2-NEXT: shll $16, %ebx -; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movzwl %ax, %r14d -; AVX2-NEXT: orl %ebx, %r14d -; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movl %eax, %ebx -; AVX2-NEXT: shll $16, %ebx -; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movzwl %ax, %eax -; AVX2-NEXT: orl %ebx, %eax -; AVX2-NEXT: shlq $32, %rax -; AVX2-NEXT: orq %r14, %rax -; AVX2-NEXT: vmovq %rax, %xmm0 -; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX2-NEXT: addq $40, %rsp -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: retq -; -; AVX512-LABEL: cvt_4f64_to_8i16_undef: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: subq $40, %rsp -; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movl %eax, %ebx -; AVX512-NEXT: shll $16, %ebx -; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movzwl %ax, %r14d -; AVX512-NEXT: orl %ebx, %r14d -; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movl %eax, %ebx -; AVX512-NEXT: shll $16, %ebx -; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movzwl %ax, %eax -; AVX512-NEXT: orl %ebx, %eax -; AVX512-NEXT: shlq $32, %rax -; AVX512-NEXT: orq %r14, %rax -; AVX512-NEXT: vmovq %rax, %xmm0 -; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512-NEXT: addq $40, %rsp -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: retq - %1 = fptrunc <4 x double> %a0 to <4 x half> - %2 = bitcast <4 x half> %1 to <4 x i16> - %3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> - ret <8 x i16> %3 -} - -define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind { -; AVX1-LABEL: cvt_4f64_to_8i16_zero: -; AVX1: # %bb.0: -; AVX1-NEXT: pushq %r14 -; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: subq $40, %rsp -; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebx -; AVX1-NEXT: shll $16, %ebx -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %r14d -; AVX1-NEXT: orl %ebx, %r14d -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebx -; AVX1-NEXT: shll $16, %ebx -; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: orl %ebx, %eax -; AVX1-NEXT: shlq $32, %rax -; AVX1-NEXT: orq %r14, %rax -; AVX1-NEXT: vmovq %rax, %xmm0 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX1-NEXT: addq $40, %rsp -; AVX1-NEXT: popq %rbx -; AVX1-NEXT: popq %r14 -; AVX1-NEXT: retq -; -; AVX2-SLOW-LABEL: cvt_4f64_to_8i16_zero: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: pushq %r14 -; AVX2-SLOW-NEXT: pushq %rbx -; AVX2-SLOW-NEXT: subq $40, %rsp -; AVX2-SLOW-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: callq __truncdfhf2 -; AVX2-SLOW-NEXT: movl %eax, %ebx -; AVX2-SLOW-NEXT: shll $16, %ebx -; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: callq __truncdfhf2 -; AVX2-SLOW-NEXT: movzwl %ax, %r14d -; AVX2-SLOW-NEXT: orl %ebx, %r14d -; AVX2-SLOW-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX2-SLOW-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: callq __truncdfhf2 -; AVX2-SLOW-NEXT: movl %eax, %ebx -; AVX2-SLOW-NEXT: shll $16, %ebx -; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX2-SLOW-NEXT: callq __truncdfhf2 -; AVX2-SLOW-NEXT: movzwl %ax, %eax -; AVX2-SLOW-NEXT: orl %ebx, %eax -; AVX2-SLOW-NEXT: shlq $32, %rax -; AVX2-SLOW-NEXT: orq %r14, %rax -; AVX2-SLOW-NEXT: vmovq %rax, %xmm0 -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX2-SLOW-NEXT: addq $40, %rsp -; AVX2-SLOW-NEXT: popq %rbx -; AVX2-SLOW-NEXT: popq %r14 -; AVX2-SLOW-NEXT: retq -; -; AVX2-FAST-LABEL: cvt_4f64_to_8i16_zero: -; AVX2-FAST: # %bb.0: -; AVX2-FAST-NEXT: pushq %r14 -; AVX2-FAST-NEXT: pushq %rbx -; AVX2-FAST-NEXT: subq $40, %rsp -; AVX2-FAST-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-FAST-NEXT: vzeroupper -; AVX2-FAST-NEXT: callq __truncdfhf2 -; AVX2-FAST-NEXT: movl %eax, %ebx -; AVX2-FAST-NEXT: shll $16, %ebx -; AVX2-FAST-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX2-FAST-NEXT: vzeroupper -; AVX2-FAST-NEXT: callq __truncdfhf2 -; AVX2-FAST-NEXT: movzwl %ax, %r14d -; AVX2-FAST-NEXT: orl %ebx, %r14d -; AVX2-FAST-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX2-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX2-FAST-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-FAST-NEXT: vzeroupper -; AVX2-FAST-NEXT: callq __truncdfhf2 -; AVX2-FAST-NEXT: movl %eax, %ebx -; AVX2-FAST-NEXT: shll $16, %ebx -; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX2-FAST-NEXT: callq __truncdfhf2 -; AVX2-FAST-NEXT: movzwl %ax, %eax -; AVX2-FAST-NEXT: orl %ebx, %eax -; AVX2-FAST-NEXT: shlq $32, %rax -; AVX2-FAST-NEXT: orq %r14, %rax -; AVX2-FAST-NEXT: vmovq %rax, %xmm0 -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-FAST-NEXT: addq $40, %rsp -; AVX2-FAST-NEXT: popq %rbx -; AVX2-FAST-NEXT: popq %r14 -; AVX2-FAST-NEXT: retq -; -; AVX512F-LABEL: cvt_4f64_to_8i16_zero: -; AVX512F: # %bb.0: -; AVX512F-NEXT: pushq %r14 -; AVX512F-NEXT: pushq %rbx -; AVX512F-NEXT: subq $40, %rsp -; AVX512F-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: callq __truncdfhf2 -; AVX512F-NEXT: movl %eax, %ebx -; AVX512F-NEXT: shll $16, %ebx -; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: callq __truncdfhf2 -; AVX512F-NEXT: movzwl %ax, %r14d -; AVX512F-NEXT: orl %ebx, %r14d -; AVX512F-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512F-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: callq __truncdfhf2 -; AVX512F-NEXT: movl %eax, %ebx -; AVX512F-NEXT: shll $16, %ebx -; AVX512F-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX512F-NEXT: callq __truncdfhf2 -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: orl %ebx, %eax -; AVX512F-NEXT: shlq $32, %rax -; AVX512F-NEXT: orq %r14, %rax -; AVX512F-NEXT: vmovq %rax, %xmm0 -; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX512F-NEXT: addq $40, %rsp -; AVX512F-NEXT: popq %rbx -; AVX512F-NEXT: popq %r14 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_4f64_to_8i16_zero: -; AVX512VL: # %bb.0: -; AVX512VL-NEXT: pushq %r14 -; AVX512VL-NEXT: pushq %rbx -; AVX512VL-NEXT: subq $40, %rsp -; AVX512VL-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: callq __truncdfhf2 -; AVX512VL-NEXT: movl %eax, %ebx -; AVX512VL-NEXT: shll $16, %ebx -; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: callq __truncdfhf2 -; AVX512VL-NEXT: movzwl %ax, %r14d -; AVX512VL-NEXT: orl %ebx, %r14d -; AVX512VL-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512VL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: callq __truncdfhf2 -; AVX512VL-NEXT: movl %eax, %ebx -; AVX512VL-NEXT: shll $16, %ebx -; AVX512VL-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX512VL-NEXT: callq __truncdfhf2 -; AVX512VL-NEXT: movzwl %ax, %eax -; AVX512VL-NEXT: orl %ebx, %eax -; AVX512VL-NEXT: shlq $32, %rax -; AVX512VL-NEXT: orq %r14, %rax -; AVX512VL-NEXT: vmovq %rax, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero -; AVX512VL-NEXT: addq $40, %rsp -; AVX512VL-NEXT: popq %rbx -; AVX512VL-NEXT: popq %r14 -; AVX512VL-NEXT: retq - %1 = fptrunc <4 x double> %a0 to <4 x half> - %2 = bitcast <4 x half> %1 to <4 x i16> - %3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> - ret <8 x i16> %3 -} - -define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind { -; AVX1-LABEL: cvt_8f64_to_8i16: -; AVX1: # %bb.0: -; AVX1-NEXT: pushq %r15 -; AVX1-NEXT: pushq %r14 -; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: subq $64, %rsp -; AVX1-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill -; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebx -; AVX1-NEXT: shll $16, %ebx -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %r15d -; AVX1-NEXT: orl %ebx, %r15d -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebx -; AVX1-NEXT: shll $16, %ebx -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %r14d -; AVX1-NEXT: orl %ebx, %r14d -; AVX1-NEXT: shlq $32, %r14 -; AVX1-NEXT: orq %r15, %r14 -; AVX1-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload -; AVX1-NEXT: # xmm0 = mem[1,0] -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebx -; AVX1-NEXT: shll $16, %ebx -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %r15d -; AVX1-NEXT: orl %ebx, %r15d -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebx -; AVX1-NEXT: shll $16, %ebx -; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: orl %ebx, %eax -; AVX1-NEXT: shlq $32, %rax -; AVX1-NEXT: orq %r15, %rax -; AVX1-NEXT: vmovq %rax, %xmm0 -; AVX1-NEXT: vmovq %r14, %xmm1 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX1-NEXT: addq $64, %rsp -; AVX1-NEXT: popq %rbx -; AVX1-NEXT: popq %r14 -; AVX1-NEXT: popq %r15 -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_8f64_to_8i16: -; AVX2: # %bb.0: -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: subq $64, %rsp -; AVX2-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill -; AVX2-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movl %eax, %ebx -; AVX2-NEXT: shll $16, %ebx -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movzwl %ax, %r15d -; AVX2-NEXT: orl %ebx, %r15d -; AVX2-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movl %eax, %ebx -; AVX2-NEXT: shll $16, %ebx -; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movzwl %ax, %r14d -; AVX2-NEXT: orl %ebx, %r14d -; AVX2-NEXT: shlq $32, %r14 -; AVX2-NEXT: orq %r15, %r14 -; AVX2-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload -; AVX2-NEXT: # xmm0 = mem[1,0] -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movl %eax, %ebx -; AVX2-NEXT: shll $16, %ebx -; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movzwl %ax, %r15d -; AVX2-NEXT: orl %ebx, %r15d -; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movl %eax, %ebx -; AVX2-NEXT: shll $16, %ebx -; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movzwl %ax, %eax -; AVX2-NEXT: orl %ebx, %eax -; AVX2-NEXT: shlq $32, %rax -; AVX2-NEXT: orq %r15, %rax -; AVX2-NEXT: vmovq %rax, %xmm0 -; AVX2-NEXT: vmovq %r14, %xmm1 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX2-NEXT: addq $64, %rsp -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: retq -; -; AVX512-LABEL: cvt_8f64_to_8i16: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r15 -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: subq $96, %rsp -; AVX512-NEXT: vmovupd %zmm0, (%rsp) # 64-byte Spill +; AVX512-NEXT: subq $96, %rsp +; AVX512-NEXT: vmovupd %zmm0, (%rsp) # 64-byte Spill ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: callq __truncdfhf2 @@ -3338,131 +2760,35 @@ } define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) nounwind { -; AVX1-LABEL: store_cvt_4f64_to_8i16_undef: -; AVX1: # %bb.0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: pushq %r14 -; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: subq $32, %rsp -; AVX1-NEXT: movq %rdi, %r14 -; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebp -; AVX1-NEXT: shll $16, %ebp -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %ebx -; AVX1-NEXT: orl %ebp, %ebx -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebp -; AVX1-NEXT: shll $16, %ebp -; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: orl %ebp, %eax -; AVX1-NEXT: shlq $32, %rax -; AVX1-NEXT: orq %rbx, %rax -; AVX1-NEXT: vmovq %rax, %xmm0 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX1-NEXT: vmovdqa %xmm0, (%r14) -; AVX1-NEXT: addq $32, %rsp -; AVX1-NEXT: popq %rbx -; AVX1-NEXT: popq %r14 -; AVX1-NEXT: popq %rbp -; AVX1-NEXT: retq -; -; AVX2-LABEL: store_cvt_4f64_to_8i16_undef: -; AVX2: # %bb.0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: subq $32, %rsp -; AVX2-NEXT: movq %rdi, %r14 -; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movl %eax, %ebp -; AVX2-NEXT: shll $16, %ebp -; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movzwl %ax, %ebx -; AVX2-NEXT: orl %ebp, %ebx -; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movl %eax, %ebp -; AVX2-NEXT: shll $16, %ebp -; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX2-NEXT: callq __truncdfhf2 -; AVX2-NEXT: movzwl %ax, %eax -; AVX2-NEXT: orl %ebp, %eax -; AVX2-NEXT: shlq $32, %rax -; AVX2-NEXT: orq %rbx, %rax -; AVX2-NEXT: vmovq %rax, %xmm0 -; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX2-NEXT: vmovdqa %xmm0, (%r14) -; AVX2-NEXT: addq $32, %rsp -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %rbp -; AVX2-NEXT: retq -; -; AVX512-LABEL: store_cvt_4f64_to_8i16_undef: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %rbp -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: subq $32, %rsp -; AVX512-NEXT: movq %rdi, %r14 -; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movl %eax, %ebp -; AVX512-NEXT: shll $16, %ebp -; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movzwl %ax, %ebx -; AVX512-NEXT: orl %ebp, %ebx -; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movl %eax, %ebp -; AVX512-NEXT: shll $16, %ebp -; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX512-NEXT: callq __truncdfhf2 -; AVX512-NEXT: movzwl %ax, %eax -; AVX512-NEXT: orl %ebp, %eax -; AVX512-NEXT: shlq $32, %rax -; AVX512-NEXT: orq %rbx, %rax -; AVX512-NEXT: vmovq %rax, %xmm0 -; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vmovdqa %xmm0, (%r14) -; AVX512-NEXT: addq $32, %rsp -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: popq %rbp -; AVX512-NEXT: retq +; ALL-LABEL: store_cvt_4f64_to_8i16_undef: +; ALL: # %bb.0: +; ALL-NEXT: pushq %rbx +; ALL-NEXT: subq $80, %rsp +; ALL-NEXT: movq %rdi, %rbx +; ALL-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0 +; ALL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; ALL-NEXT: vzeroupper +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; ALL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; ALL-NEXT: vzeroupper +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovaps %xmm0, (%rbx) +; ALL-NEXT: addq $80, %rsp +; ALL-NEXT: popq %rbx +; ALL-NEXT: retq %1 = fptrunc <4 x double> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> %3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> @@ -3471,218 +2797,35 @@ } define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounwind { -; AVX1-LABEL: store_cvt_4f64_to_8i16_zero: -; AVX1: # %bb.0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: pushq %r14 -; AVX1-NEXT: pushq %rbx -; AVX1-NEXT: subq $32, %rsp -; AVX1-NEXT: movq %rdi, %r14 -; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebp -; AVX1-NEXT: shll $16, %ebp -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %ebx -; AVX1-NEXT: orl %ebp, %ebx -; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movl %eax, %ebp -; AVX1-NEXT: shll $16, %ebp -; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX1-NEXT: callq __truncdfhf2 -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: orl %ebp, %eax -; AVX1-NEXT: shlq $32, %rax -; AVX1-NEXT: orq %rbx, %rax -; AVX1-NEXT: vmovq %rax, %xmm0 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX1-NEXT: vmovdqa %xmm0, (%r14) -; AVX1-NEXT: addq $32, %rsp -; AVX1-NEXT: popq %rbx -; AVX1-NEXT: popq %r14 -; AVX1-NEXT: popq %rbp -; AVX1-NEXT: retq -; -; AVX2-SLOW-LABEL: store_cvt_4f64_to_8i16_zero: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: pushq %rbp -; AVX2-SLOW-NEXT: pushq %r14 -; AVX2-SLOW-NEXT: pushq %rbx -; AVX2-SLOW-NEXT: subq $32, %rsp -; AVX2-SLOW-NEXT: movq %rdi, %r14 -; AVX2-SLOW-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: callq __truncdfhf2 -; AVX2-SLOW-NEXT: movl %eax, %ebp -; AVX2-SLOW-NEXT: shll $16, %ebp -; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: callq __truncdfhf2 -; AVX2-SLOW-NEXT: movzwl %ax, %ebx -; AVX2-SLOW-NEXT: orl %ebp, %ebx -; AVX2-SLOW-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX2-SLOW-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: callq __truncdfhf2 -; AVX2-SLOW-NEXT: movl %eax, %ebp -; AVX2-SLOW-NEXT: shll $16, %ebp -; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX2-SLOW-NEXT: callq __truncdfhf2 -; AVX2-SLOW-NEXT: movzwl %ax, %eax -; AVX2-SLOW-NEXT: orl %ebp, %eax -; AVX2-SLOW-NEXT: shlq $32, %rax -; AVX2-SLOW-NEXT: orq %rbx, %rax -; AVX2-SLOW-NEXT: vmovq %rax, %xmm0 -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX2-SLOW-NEXT: vmovdqa %xmm0, (%r14) -; AVX2-SLOW-NEXT: addq $32, %rsp -; AVX2-SLOW-NEXT: popq %rbx -; AVX2-SLOW-NEXT: popq %r14 -; AVX2-SLOW-NEXT: popq %rbp -; AVX2-SLOW-NEXT: retq -; -; AVX2-FAST-LABEL: store_cvt_4f64_to_8i16_zero: -; AVX2-FAST: # %bb.0: -; AVX2-FAST-NEXT: pushq %rbp -; AVX2-FAST-NEXT: pushq %r14 -; AVX2-FAST-NEXT: pushq %rbx -; AVX2-FAST-NEXT: subq $32, %rsp -; AVX2-FAST-NEXT: movq %rdi, %r14 -; AVX2-FAST-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-FAST-NEXT: vzeroupper -; AVX2-FAST-NEXT: callq __truncdfhf2 -; AVX2-FAST-NEXT: movl %eax, %ebp -; AVX2-FAST-NEXT: shll $16, %ebp -; AVX2-FAST-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX2-FAST-NEXT: vzeroupper -; AVX2-FAST-NEXT: callq __truncdfhf2 -; AVX2-FAST-NEXT: movzwl %ax, %ebx -; AVX2-FAST-NEXT: orl %ebp, %ebx -; AVX2-FAST-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX2-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX2-FAST-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-FAST-NEXT: vzeroupper -; AVX2-FAST-NEXT: callq __truncdfhf2 -; AVX2-FAST-NEXT: movl %eax, %ebp -; AVX2-FAST-NEXT: shll $16, %ebp -; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX2-FAST-NEXT: callq __truncdfhf2 -; AVX2-FAST-NEXT: movzwl %ax, %eax -; AVX2-FAST-NEXT: orl %ebp, %eax -; AVX2-FAST-NEXT: shlq $32, %rax -; AVX2-FAST-NEXT: orq %rbx, %rax -; AVX2-FAST-NEXT: vmovq %rax, %xmm0 -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-FAST-NEXT: vmovdqa %xmm0, (%r14) -; AVX2-FAST-NEXT: addq $32, %rsp -; AVX2-FAST-NEXT: popq %rbx -; AVX2-FAST-NEXT: popq %r14 -; AVX2-FAST-NEXT: popq %rbp -; AVX2-FAST-NEXT: retq -; -; AVX512F-LABEL: store_cvt_4f64_to_8i16_zero: -; AVX512F: # %bb.0: -; AVX512F-NEXT: pushq %rbp -; AVX512F-NEXT: pushq %r14 -; AVX512F-NEXT: pushq %rbx -; AVX512F-NEXT: subq $32, %rsp -; AVX512F-NEXT: movq %rdi, %r14 -; AVX512F-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: callq __truncdfhf2 -; AVX512F-NEXT: movl %eax, %ebp -; AVX512F-NEXT: shll $16, %ebp -; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: callq __truncdfhf2 -; AVX512F-NEXT: movzwl %ax, %ebx -; AVX512F-NEXT: orl %ebp, %ebx -; AVX512F-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512F-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: callq __truncdfhf2 -; AVX512F-NEXT: movl %eax, %ebp -; AVX512F-NEXT: shll $16, %ebp -; AVX512F-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX512F-NEXT: callq __truncdfhf2 -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: orl %ebp, %eax -; AVX512F-NEXT: shlq $32, %rax -; AVX512F-NEXT: orq %rbx, %rax -; AVX512F-NEXT: vmovq %rax, %xmm0 -; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero -; AVX512F-NEXT: vmovdqa %xmm0, (%r14) -; AVX512F-NEXT: addq $32, %rsp -; AVX512F-NEXT: popq %rbx -; AVX512F-NEXT: popq %r14 -; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: store_cvt_4f64_to_8i16_zero: -; AVX512VL: # %bb.0: -; AVX512VL-NEXT: pushq %rbp -; AVX512VL-NEXT: pushq %r14 -; AVX512VL-NEXT: pushq %rbx -; AVX512VL-NEXT: subq $32, %rsp -; AVX512VL-NEXT: movq %rdi, %r14 -; AVX512VL-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: callq __truncdfhf2 -; AVX512VL-NEXT: movl %eax, %ebp -; AVX512VL-NEXT: shll $16, %ebp -; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: callq __truncdfhf2 -; AVX512VL-NEXT: movzwl %ax, %ebx -; AVX512VL-NEXT: orl %ebp, %ebx -; AVX512VL-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload -; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512VL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: callq __truncdfhf2 -; AVX512VL-NEXT: movl %eax, %ebp -; AVX512VL-NEXT: shll $16, %ebp -; AVX512VL-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload -; AVX512VL-NEXT: callq __truncdfhf2 -; AVX512VL-NEXT: movzwl %ax, %eax -; AVX512VL-NEXT: orl %ebp, %eax -; AVX512VL-NEXT: shlq $32, %rax -; AVX512VL-NEXT: orq %rbx, %rax -; AVX512VL-NEXT: vmovq %rax, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero -; AVX512VL-NEXT: vmovdqa %xmm0, (%r14) -; AVX512VL-NEXT: addq $32, %rsp -; AVX512VL-NEXT: popq %rbx -; AVX512VL-NEXT: popq %r14 -; AVX512VL-NEXT: popq %rbp -; AVX512VL-NEXT: retq +; ALL-LABEL: store_cvt_4f64_to_8i16_zero: +; ALL: # %bb.0: +; ALL-NEXT: pushq %rbx +; ALL-NEXT: subq $80, %rsp +; ALL-NEXT: movq %rdi, %rbx +; ALL-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0 +; ALL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; ALL-NEXT: vzeroupper +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; ALL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; ALL-NEXT: vzeroupper +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; ALL-NEXT: # xmm0 = mem[1,0] +; ALL-NEXT: callq __truncdfhf2 +; ALL-NEXT: movw %ax, {{[0-9]+}}(%rsp) +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovaps %xmm0, (%rbx) +; ALL-NEXT: addq $80, %rsp +; ALL-NEXT: popq %rbx +; ALL-NEXT: retq %1 = fptrunc <4 x double> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> %3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32>