Index: lib/CodeGen/SelectionDAG/LegalizeDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2353,34 +2353,30 @@ SDValue Result = DAG.getFPExtendOrRound(Sub, dl, DestVT); return Result; } - assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); - // Code below here assumes !isSigned without checking again. // Implementation of unsigned i64 to f64 following the algorithm in // __floatundidf in compiler_rt. This implementation has the advantage // of performing rounding correctly, both in the default rounding mode // and in all alternate rounding modes. - // TODO: Generalize this for use with other types. - if (SrcVT == MVT::i64 && DestVT == MVT::f64) { + if (!isSigned && SrcVT.getScalarType() == MVT::i64 && + DestVT.getScalarType() == MVT::f64) { LLVM_DEBUG(dbgs() << "Converting unsigned i64 to f64\n"); - SDValue TwoP52 = - DAG.getConstant(UINT64_C(0x4330000000000000), dl, MVT::i64); - SDValue TwoP84PlusTwoP52 = - DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), dl, - MVT::f64); - SDValue TwoP84 = - DAG.getConstant(UINT64_C(0x4530000000000000), dl, MVT::i64); - - SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); - SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, - DAG.getConstant(32, dl, MVT::i64)); - SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); - SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); - SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr); - SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr); - SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, - TwoP84PlusTwoP52); - return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); + SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); + SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( + BitsToDouble(UINT64_C(0x4530000000100000)), dl, DestVT); + SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); + SDValue Mask32 = DAG.getConstant(APInt::getLowBitsSet(64, 32), dl, SrcVT); + + EVT ShiftVT = TLI.getShiftAmountTy(SrcVT, DAG.getDataLayout()); + SDValue ShiftConst = DAG.getConstant(32, dl, ShiftVT); + SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Op0, Mask32); + SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Op0, ShiftConst); + SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); + SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); + SDValue LoFlt = DAG.getBitcast(DestVT, LoOr); + SDValue HiFlt = DAG.getBitcast(DestVT, HiOr); + SDValue HiSub = DAG.getNode(ISD::FSUB, dl, DestVT, HiFlt, TwoP84PlusTwoP52); + return DAG.getNode(ISD::FADD, dl, DestVT, LoFlt, HiSub); } // TODO: Generalize this for use with other types. @@ -2443,6 +2439,8 @@ return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd, DAG.getIntPtrConstant(0, dl)); } + assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); + // Code below here assumes !isSigned without checking again. SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1040,6 +1040,9 @@ setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); + if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) + setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Custom); + if (!Subtarget.hasAVX512()) setOperationAction(ISD::BITCAST, MVT::v32i1, Custom); @@ -17057,6 +17060,49 @@ return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh); } +static SDValue lowerUINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG, + const X86Subtarget &Subtarget, + const SDLoc &dl) { + MVT VT = Op.getSimpleValueType(); + if (VT.getScalarType() != MVT::f32) + return SDValue(); + + SDValue Src = Op.getOperand(0); + MVT SrcVT = Src.getSimpleValueType(); + int NumElts = SrcVT.getVectorNumElements(); + + SDValue Mask = DAG.getConstant(1, dl, SrcVT); + SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT, + DAG.getNode(ISD::SRL, dl, SrcVT, Src, Mask), + DAG.getNode(ISD::AND, dl, SrcVT, Src, Mask)); + SDValue SignSrc = DAG.getSelect(dl, SrcVT, Src, Sign, Src); + + // Scalarize actual i64 to f32 conversion. + SmallVector CvtScalars; + for (int i = 0; i != NumElts; ++i) { + SDValue Src = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, SignSrc, + DAG.getIntPtrConstant(i, dl)); + CvtScalars.push_back(DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Src)); + } + SDValue SignCvt = DAG.getBuildVector(VT, dl, CvtScalars); + + // Extract the upper 32-bits of each double - we need the sign for selection. + SmallVector PackMask; + for (int i = 0; i != NumElts; ++i) + PackMask.push_back((i * 2) + 1); + + unsigned SizeInBits = SrcVT.getSizeInBits(); + MVT SrcVT32 = MVT::getVectorVT(MVT::i32, NumElts); + SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2); + SDValue Hi = extractSubVector(Src, NumElts / 2, DAG, dl, SizeInBits / 2); + SDValue PackSrc = + DAG.getVectorShuffle(SrcVT32, dl, DAG.getBitcast(SrcVT32, Lo), + DAG.getBitcast(SrcVT32, Hi), PackMask); + return DAG.getSelect(dl, VT, PackSrc, + DAG.getNode(ISD::FADD, dl, VT, SignCvt, SignCvt), + SignCvt); +} + static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG, const X86Subtarget &Subtarget) { SDValue N0 = Op.getOperand(0); @@ -17072,6 +17118,9 @@ case MVT::v8i32: assert(!Subtarget.hasAVX512()); return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget); + case MVT::v4i64: + assert(Subtarget.hasSSE41() && !Subtarget.hasAVX512()); + return lowerUINT_TO_FP_vXi64(Op, DAG, Subtarget, dl); } } @@ -26114,6 +26163,31 @@ Results.push_back(DAG.getNode(X86ISD::CVTUI2P, dl, MVT::v4f32, Src)); return; } + if (SrcVT == MVT::v2i64 && Subtarget.is64Bit() && Subtarget.hasAVX() && + !Subtarget.hasAVX512()) { + // TODO Any SSE41+ subtarget should work here but BLENDV codegen ends up + // a lot worse than it should be. + SDValue Zero = DAG.getConstant(0, dl, SrcVT); + SDValue Mask = DAG.getConstant(1, dl, SrcVT); + SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT, + DAG.getNode(ISD::SRL, dl, SrcVT, Src, Mask), + DAG.getNode(ISD::AND, dl, SrcVT, Src, Mask)); + SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i1, Zero, Src, ISD::SETLT); + SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src); + SmallVector SignCvts(4, DAG.getUNDEF(MVT::f32)); + for (int i = 0; i != 2; ++i) { + SDValue Src = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, + SignSrc, DAG.getIntPtrConstant(i, dl)); + SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Src); + }; + SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts); + IsNeg = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, IsNeg, + DAG.getUNDEF(MVT::v2i1)); + SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt); + SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt); + Results.push_back(Cvt); + return; + } if (SrcVT != MVT::v2i32) return; SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src); @@ -39051,6 +39125,7 @@ SDValue Op0 = N->getOperand(0); EVT VT = N->getValueType(0); EVT InVT = Op0.getValueType(); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32)) // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32)) @@ -39065,10 +39140,23 @@ return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P); } + // If upper bits are zero, then use SINT_TO_FP. + // UINT_TO_FP(vXi64) -> SINT_TO_FP(TRUNC(vXi64 to vXi32)) + unsigned LeadingZeros = DAG.computeKnownBits(Op0).countMinLeadingZeros(); + if (LeadingZeros > 32 && !TLI.isOperationLegal(ISD::UINT_TO_FP, InVT)) { + SDLoc dl(N); + EVT DstVT = MVT::i32; + if (InVT.isVector()) + DstVT = EVT::getVectorVT(*DAG.getContext(), DstVT, + InVT.getVectorNumElements()); + SDValue P = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Op0); + return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P); + } + // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform // the optimization here. - if (DAG.SignBitIsZero(Op0)) + if (LeadingZeros >= 1 && TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, InVT)) return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0); return SDValue(); Index: test/CodeGen/X86/vec_int_to_fp.ll =================================================================== --- test/CodeGen/X86/vec_int_to_fp.ll +++ test/CodeGen/X86/vec_int_to_fp.ll @@ -885,25 +885,33 @@ ; SSE41-NEXT: haddpd %xmm3, %xmm1 ; SSE41-NEXT: retq ; -; VEX-LABEL: uitofp_4i64_to_4f64: -; VEX: # %bb.0: -; VEX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; VEX-NEXT: vmovapd {{.*#+}} xmm2 = [1127219200,1160773632,0,0] -; VEX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; VEX-NEXT: vmovapd {{.*#+}} xmm4 = [4503599627370496,1.9342813113834067E+25] -; VEX-NEXT: vsubpd %xmm4, %xmm3, %xmm3 -; VEX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1] -; VEX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; VEX-NEXT: vsubpd %xmm4, %xmm1, %xmm1 -; VEX-NEXT: vhaddpd %xmm1, %xmm3, %xmm1 -; VEX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; VEX-NEXT: vsubpd %xmm4, %xmm3, %xmm3 -; VEX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1] -; VEX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; VEX-NEXT: vsubpd %xmm4, %xmm0, %xmm0 -; VEX-NEXT: vhaddpd %xmm0, %xmm3, %xmm0 -; VEX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; VEX-NEXT: retq +; AVX1-LABEL: uitofp_4i64_to_4f64: +; AVX1: # %bb.0: +; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; AVX1-NEXT: vorps {{.*}}(%rip), %ymm1, %ymm1 +; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vorpd {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vsubpd {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_4i64_to_4f64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4841369599423283200,4841369599423283200,4841369599423283200,4841369599423283200] +; AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4985484787499139072,4985484787499139072,4985484787499139072,4985484787499139072] +; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25] +; AVX2-NEXT: vsubpd %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: retq ; ; AVX512F-LABEL: uitofp_4i64_to_4f64: ; AVX512F: # %bb.0: @@ -1925,42 +1933,21 @@ ; ; VEX-LABEL: uitofp_2i64_to_4f32: ; VEX: # %bb.0: +; VEX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; VEX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm1 +; VEX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm2 +; VEX-NEXT: vpsrlq $1, %xmm0, %xmm3 +; VEX-NEXT: vpor %xmm2, %xmm3, %xmm2 +; VEX-NEXT: vblendvpd %xmm1, %xmm2, %xmm0, %xmm0 ; VEX-NEXT: vpextrq $1, %xmm0, %rax -; VEX-NEXT: testq %rax, %rax -; VEX-NEXT: js .LBB39_1 -; VEX-NEXT: # %bb.2: -; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; VEX-NEXT: jmp .LBB39_3 -; VEX-NEXT: .LBB39_1: -; VEX-NEXT: movq %rax, %rcx -; VEX-NEXT: shrq %rcx -; VEX-NEXT: andl $1, %eax -; VEX-NEXT: orq %rcx, %rax -; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; VEX-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; VEX-NEXT: .LBB39_3: +; VEX-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm2 ; VEX-NEXT: vmovq %xmm0, %rax -; VEX-NEXT: testq %rax, %rax -; VEX-NEXT: js .LBB39_4 -; VEX-NEXT: # %bb.5: -; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0 -; VEX-NEXT: jmp .LBB39_6 -; VEX-NEXT: .LBB39_4: -; VEX-NEXT: movq %rax, %rcx -; VEX-NEXT: shrq %rcx -; VEX-NEXT: andl $1, %eax -; VEX-NEXT: orq %rcx, %rax -; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0 -; VEX-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; VEX-NEXT: .LBB39_6: -; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; VEX-NEXT: testq %rax, %rax -; VEX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; VEX-NEXT: js .LBB39_8 -; VEX-NEXT: # %bb.7: -; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm1 -; VEX-NEXT: .LBB39_8: -; VEX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0] +; VEX-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm0 +; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3] +; VEX-NEXT: vaddps %xmm0, %xmm0, %xmm2 +; VEX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; VEX-NEXT: vpslld $31, %xmm1, %xmm1 +; VEX-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 ; VEX-NEXT: retq ; ; AVX512F-LABEL: uitofp_2i64_to_4f32: @@ -2082,35 +2069,22 @@ ; ; VEX-LABEL: uitofp_2i64_to_2f32: ; VEX: # %bb.0: +; VEX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; VEX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm1 +; VEX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm2 +; VEX-NEXT: vpsrlq $1, %xmm0, %xmm3 +; VEX-NEXT: vpor %xmm2, %xmm3, %xmm2 +; VEX-NEXT: vblendvpd %xmm1, %xmm2, %xmm0, %xmm0 ; VEX-NEXT: vpextrq $1, %xmm0, %rax -; VEX-NEXT: testq %rax, %rax -; VEX-NEXT: js .LBB40_1 -; VEX-NEXT: # %bb.2: -; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; VEX-NEXT: jmp .LBB40_3 -; VEX-NEXT: .LBB40_1: -; VEX-NEXT: movq %rax, %rcx -; VEX-NEXT: shrq %rcx -; VEX-NEXT: andl $1, %eax -; VEX-NEXT: orq %rcx, %rax -; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; VEX-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; VEX-NEXT: .LBB40_3: +; VEX-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm2 ; VEX-NEXT: vmovq %xmm0, %rax -; VEX-NEXT: testq %rax, %rax -; VEX-NEXT: js .LBB40_4 -; VEX-NEXT: # %bb.5: -; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0 -; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero -; VEX-NEXT: retq -; VEX-NEXT: .LBB40_4: -; VEX-NEXT: movq %rax, %rcx -; VEX-NEXT: shrq %rcx -; VEX-NEXT: andl $1, %eax -; VEX-NEXT: orq %rcx, %rax -; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0 -; VEX-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero +; VEX-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm0 +; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3] +; VEX-NEXT: vaddps %xmm0, %xmm0, %xmm2 +; VEX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; VEX-NEXT: vpslld $31, %xmm1, %xmm1 +; VEX-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 +; VEX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero ; VEX-NEXT: retq ; ; AVX512F-LABEL: uitofp_2i64_to_2f32: @@ -2240,45 +2214,56 @@ ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0] ; SSE41-NEXT: retq ; -; VEX-LABEL: uitofp_4i64_to_4f32_undef: -; VEX: # %bb.0: -; VEX-NEXT: vpextrq $1, %xmm0, %rax -; VEX-NEXT: testq %rax, %rax -; VEX-NEXT: js .LBB41_1 -; VEX-NEXT: # %bb.2: -; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; VEX-NEXT: jmp .LBB41_3 -; VEX-NEXT: .LBB41_1: -; VEX-NEXT: movq %rax, %rcx -; VEX-NEXT: shrq %rcx -; VEX-NEXT: andl $1, %eax -; VEX-NEXT: orq %rcx, %rax -; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; VEX-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; VEX-NEXT: .LBB41_3: -; VEX-NEXT: vmovq %xmm0, %rax -; VEX-NEXT: testq %rax, %rax -; VEX-NEXT: js .LBB41_4 -; VEX-NEXT: # %bb.5: -; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0 -; VEX-NEXT: jmp .LBB41_6 -; VEX-NEXT: .LBB41_4: -; VEX-NEXT: movq %rax, %rcx -; VEX-NEXT: shrq %rcx -; VEX-NEXT: andl $1, %eax -; VEX-NEXT: orq %rcx, %rax -; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0 -; VEX-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; VEX-NEXT: .LBB41_6: -; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; VEX-NEXT: testq %rax, %rax -; VEX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; VEX-NEXT: js .LBB41_8 -; VEX-NEXT: # %bb.7: -; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm1 -; VEX-NEXT: .LBB41_8: -; VEX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0] -; VEX-NEXT: retq +; AVX1-LABEL: uitofp_4i64_to_4f32_undef: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm1 +; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm2 +; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1 +; AVX1-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm1 +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3 +; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm3 +; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm1 +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0] +; AVX1-NEXT: vaddps %xmm1, %xmm1, %xmm2 +; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,3] +; AVX1-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_4i64_to_4f32_undef: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1] +; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm2 +; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm1 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0] +; AVX2-NEXT: vaddps %xmm1, %xmm1, %xmm2 +; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,3] +; AVX2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq ; ; AVX512F-LABEL: uitofp_4i64_to_4f32_undef: ; AVX512F: # %bb.0: @@ -2675,133 +2660,54 @@ ; ; AVX1-LABEL: uitofp_4i64_to_4f32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB47_1 -; AVX1-NEXT: # %bb.2: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX1-NEXT: jmp .LBB47_3 -; AVX1-NEXT: .LBB47_1: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX1-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: .LBB47_3: -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB47_4 -; AVX1-NEXT: # %bb.5: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2 -; AVX1-NEXT: jmp .LBB47_6 -; AVX1-NEXT: .LBB47_4: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2 -; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: .LBB47_6: -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB47_7 -; AVX1-NEXT: # %bb.8: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2 -; AVX1-NEXT: jmp .LBB47_9 -; AVX1-NEXT: .LBB47_7: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2 -; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: .LBB47_9: -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB47_10 -; AVX1-NEXT: # %bb.11: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; AVX1-NEXT: .LBB47_10: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 -; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1-NEXT: vandpd {{.*}}(%rip), %ymm0, %ymm3 +; AVX1-NEXT: vorpd %ymm3, %ymm1, %ymm1 +; AVX1-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm1 +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm3 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4 +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm4 +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3] +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm1 +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[0] +; AVX1-NEXT: vaddps %xmm1, %xmm1, %xmm3 +; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3] +; AVX1-NEXT: vblendvps %xmm0, %xmm3, %xmm1, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: uitofp_4i64_to_4f32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB47_1 -; AVX2-NEXT: # %bb.2: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX2-NEXT: jmp .LBB47_3 -; AVX2-NEXT: .LBB47_1: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX2-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: .LBB47_3: -; AVX2-NEXT: vmovq %xmm0, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB47_4 -; AVX2-NEXT: # %bb.5: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2 -; AVX2-NEXT: jmp .LBB47_6 -; AVX2-NEXT: .LBB47_4: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2 -; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: .LBB47_6: -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vmovq %xmm0, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB47_7 -; AVX2-NEXT: # %bb.8: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2 -; AVX2-NEXT: jmp .LBB47_9 -; AVX2-NEXT: .LBB47_7: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1] +; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm2 +; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax ; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2 -; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: .LBB47_9: -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] -; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB47_10 -; AVX2-NEXT: # %bb.11: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; AVX2-NEXT: .LBB47_10: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 -; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm1 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0] +; AVX2-NEXT: vaddps %xmm1, %xmm1, %xmm2 +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm3[1,3] +; AVX2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -3702,26 +3608,35 @@ ; SSE41-NEXT: haddpd %xmm3, %xmm1 ; SSE41-NEXT: retq ; -; VEX-LABEL: uitofp_load_4i64_to_4f64: -; VEX: # %bb.0: -; VEX-NEXT: vmovapd (%rdi), %ymm0 -; VEX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; VEX-NEXT: vmovapd {{.*#+}} xmm2 = [1127219200,1160773632,0,0] -; VEX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; VEX-NEXT: vmovapd {{.*#+}} xmm4 = [4503599627370496,1.9342813113834067E+25] -; VEX-NEXT: vsubpd %xmm4, %xmm3, %xmm3 -; VEX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1] -; VEX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; VEX-NEXT: vsubpd %xmm4, %xmm1, %xmm1 -; VEX-NEXT: vhaddpd %xmm1, %xmm3, %xmm1 -; VEX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; VEX-NEXT: vsubpd %xmm4, %xmm3, %xmm3 -; VEX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1] -; VEX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; VEX-NEXT: vsubpd %xmm4, %xmm0, %xmm0 -; VEX-NEXT: vhaddpd %xmm0, %xmm3, %xmm0 -; VEX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; VEX-NEXT: retq +; AVX1-LABEL: uitofp_load_4i64_to_4f64: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; AVX1-NEXT: vorps {{.*}}(%rip), %ymm1, %ymm1 +; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vorpd {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vsubpd {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_load_4i64_to_4f64: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4841369599423283200,4841369599423283200,4841369599423283200,4841369599423283200] +; AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4985484787499139072,4985484787499139072,4985484787499139072,4985484787499139072] +; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25] +; AVX2-NEXT: vsubpd %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: retq ; ; AVX512F-LABEL: uitofp_load_4i64_to_4f64: ; AVX512F: # %bb.0: @@ -4607,134 +4522,55 @@ ; AVX1-LABEL: uitofp_load_4i64_to_4f32: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rdi), %ymm0 -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB76_1 -; AVX1-NEXT: # %bb.2: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX1-NEXT: jmp .LBB76_3 -; AVX1-NEXT: .LBB76_1: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX1-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: .LBB76_3: -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB76_4 -; AVX1-NEXT: # %bb.5: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2 -; AVX1-NEXT: jmp .LBB76_6 -; AVX1-NEXT: .LBB76_4: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2 -; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: .LBB76_6: -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB76_7 -; AVX1-NEXT: # %bb.8: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2 -; AVX1-NEXT: jmp .LBB76_9 -; AVX1-NEXT: .LBB76_7: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2 -; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: .LBB76_9: -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB76_10 -; AVX1-NEXT: # %bb.11: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; AVX1-NEXT: .LBB76_10: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 -; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1-NEXT: vandpd {{.*}}(%rip), %ymm0, %ymm3 +; AVX1-NEXT: vorpd %ymm3, %ymm1, %ymm1 +; AVX1-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm1 +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm3 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4 +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm4 +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3] +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm1 +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[0] +; AVX1-NEXT: vaddps %xmm1, %xmm1, %xmm3 +; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3] +; AVX1-NEXT: vblendvps %xmm0, %xmm3, %xmm1, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: uitofp_load_4i64_to_4f32: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 -; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB76_1 -; AVX2-NEXT: # %bb.2: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX2-NEXT: jmp .LBB76_3 -; AVX2-NEXT: .LBB76_1: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX2-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: .LBB76_3: -; AVX2-NEXT: vmovq %xmm0, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB76_4 -; AVX2-NEXT: # %bb.5: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2 -; AVX2-NEXT: jmp .LBB76_6 -; AVX2-NEXT: .LBB76_4: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2 -; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: .LBB76_6: -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vmovq %xmm0, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB76_7 -; AVX2-NEXT: # %bb.8: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2 -; AVX2-NEXT: jmp .LBB76_9 -; AVX2-NEXT: .LBB76_7: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1] +; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm2 +; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax ; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2 -; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: .LBB76_9: -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] -; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB76_10 -; AVX2-NEXT: # %bb.11: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; AVX2-NEXT: .LBB76_10: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 -; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm1 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0] +; AVX2-NEXT: vaddps %xmm1, %xmm1, %xmm2 +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm3[1,3] +; AVX2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -5196,256 +5032,102 @@ ; ; AVX1-LABEL: uitofp_load_8i64_to_8f32: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa (%rdi), %ymm0 -; AVX1-NEXT: vmovdqa 32(%rdi), %ymm2 +; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1] +; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm3 +; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 +; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm6 +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 +; AVX1-NEXT: vorps %ymm3, %ymm4, %ymm3 +; AVX1-NEXT: vblendvpd %ymm1, %ymm3, %ymm1, %ymm3 +; AVX1-NEXT: vpextrq $1, %xmm3, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm7, %xmm4 +; AVX1-NEXT: vmovq %xmm3, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm7, %xmm6 +; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 +; AVX1-NEXT: vmovq %xmm3, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm7, %xmm6 +; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3] +; AVX1-NEXT: vpextrq $1, %xmm3, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm7, %xmm3 +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[0] +; AVX1-NEXT: vaddps %xmm3, %xmm3, %xmm4 +; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm5[1,3] +; AVX1-NEXT: vblendvps %xmm1, %xmm4, %xmm3, %xmm1 +; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm2 +; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm5 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 +; AVX1-NEXT: vorps %ymm2, %ymm3, %ymm2 +; AVX1-NEXT: vblendvpd %ymm0, %ymm2, %ymm0, %ymm2 ; AVX1-NEXT: vpextrq $1, %xmm2, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB80_1 -; AVX1-NEXT: # %bb.2: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX1-NEXT: jmp .LBB80_3 -; AVX1-NEXT: .LBB80_1: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX1-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: .LBB80_3: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm7, %xmm3 ; AVX1-NEXT: vmovq %xmm2, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB80_4 -; AVX1-NEXT: # %bb.5: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm4 -; AVX1-NEXT: jmp .LBB80_6 -; AVX1-NEXT: .LBB80_4: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3 -; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm4 -; AVX1-NEXT: .LBB80_6: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm7, %xmm5 +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[2,3] ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2 ; AVX1-NEXT: vmovq %xmm2, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB80_7 -; AVX1-NEXT: # %bb.8: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3 -; AVX1-NEXT: jmp .LBB80_9 -; AVX1-NEXT: .LBB80_7: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3 -; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: .LBB80_9: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm7, %xmm5 +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3] ; AVX1-NEXT: vpextrq $1, %xmm2, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB80_10 -; AVX1-NEXT: # %bb.11: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm2 -; AVX1-NEXT: jmp .LBB80_12 -; AVX1-NEXT: .LBB80_10: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm2 -; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: .LBB80_12: -; AVX1-NEXT: vpextrq $1, %xmm0, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB80_13 -; AVX1-NEXT: # %bb.14: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5 -; AVX1-NEXT: jmp .LBB80_15 -; AVX1-NEXT: .LBB80_13: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5 -; AVX1-NEXT: vaddss %xmm5, %xmm5, %xmm5 -; AVX1-NEXT: .LBB80_15: -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3] -; AVX1-NEXT: vmovq %xmm0, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB80_16 -; AVX1-NEXT: # %bb.17: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4 -; AVX1-NEXT: jmp .LBB80_18 -; AVX1-NEXT: .LBB80_16: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4 -; AVX1-NEXT: vaddss %xmm4, %xmm4, %xmm4 -; AVX1-NEXT: .LBB80_18: -; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vmovq %xmm3, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB80_19 -; AVX1-NEXT: # %bb.20: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0 -; AVX1-NEXT: jmp .LBB80_21 -; AVX1-NEXT: .LBB80_19: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0 -; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: .LBB80_21: -; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0],xmm4[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm2[0] -; AVX1-NEXT: vpextrq $1, %xmm3, %rax -; AVX1-NEXT: testq %rax, %rax -; AVX1-NEXT: js .LBB80_22 -; AVX1-NEXT: # %bb.23: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1 -; AVX1-NEXT: jmp .LBB80_24 -; AVX1-NEXT: .LBB80_22: -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: shrq %rcx -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1 -; AVX1-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: .LBB80_24: -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[0] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm7, %xmm2 +; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[0] +; AVX1-NEXT: vaddps %xmm2, %xmm2, %xmm3 +; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3] +; AVX1-NEXT: vblendvps %xmm0, %xmm3, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: uitofp_load_8i64_to_8f32: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 -; AVX2-NEXT: vmovdqa 32(%rdi), %ymm2 +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1] +; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm4 +; AVX2-NEXT: vpor %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vblendvpd %ymm1, %ymm3, %ymm1, %ymm3 +; AVX2-NEXT: vpextrq $1, %xmm3, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm4 +; AVX2-NEXT: vmovq %xmm3, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5 +; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3 +; AVX2-NEXT: vmovq %xmm3, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5 +; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3] +; AVX2-NEXT: vpextrq $1, %xmm3, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[0] +; AVX2-NEXT: vaddps %xmm3, %xmm3, %xmm4 +; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm5 +; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm5[1,3] +; AVX2-NEXT: vblendvps %xmm1, %xmm4, %xmm3, %xmm1 +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm2 +; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm3 +; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vblendvpd %ymm0, %ymm2, %ymm0, %ymm2 ; AVX2-NEXT: vpextrq $1, %xmm2, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB80_1 -; AVX2-NEXT: # %bb.2: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX2-NEXT: jmp .LBB80_3 -; AVX2-NEXT: .LBB80_1: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1 -; AVX2-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: .LBB80_3: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3 ; AVX2-NEXT: vmovq %xmm2, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB80_4 -; AVX2-NEXT: # %bb.5: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm4 -; AVX2-NEXT: jmp .LBB80_6 -; AVX2-NEXT: .LBB80_4: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3 -; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm4 -; AVX2-NEXT: .LBB80_6: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4 +; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3] ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX2-NEXT: vmovq %xmm2, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB80_7 -; AVX2-NEXT: # %bb.8: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3 -; AVX2-NEXT: jmp .LBB80_9 -; AVX2-NEXT: .LBB80_7: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3 -; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3 -; AVX2-NEXT: .LBB80_9: -; AVX2-NEXT: vpextrq $1, %xmm2, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB80_10 -; AVX2-NEXT: # %bb.11: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm2 -; AVX2-NEXT: jmp .LBB80_12 -; AVX2-NEXT: .LBB80_10: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm2 -; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: .LBB80_12: -; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB80_13 -; AVX2-NEXT: # %bb.14: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5 -; AVX2-NEXT: jmp .LBB80_15 -; AVX2-NEXT: .LBB80_13: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5 -; AVX2-NEXT: vaddss %xmm5, %xmm5, %xmm5 -; AVX2-NEXT: .LBB80_15: -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3] -; AVX2-NEXT: vmovq %xmm0, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB80_16 -; AVX2-NEXT: # %bb.17: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4 -; AVX2-NEXT: jmp .LBB80_18 -; AVX2-NEXT: .LBB80_16: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax ; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4 -; AVX2-NEXT: vaddss %xmm4, %xmm4, %xmm4 -; AVX2-NEXT: .LBB80_18: -; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vmovq %xmm3, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB80_19 -; AVX2-NEXT: # %bb.20: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0 -; AVX2-NEXT: jmp .LBB80_21 -; AVX2-NEXT: .LBB80_19: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0 -; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: .LBB80_21: -; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0],xmm4[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm2[0] -; AVX2-NEXT: vpextrq $1, %xmm3, %rax -; AVX2-NEXT: testq %rax, %rax -; AVX2-NEXT: js .LBB80_22 -; AVX2-NEXT: # %bb.23: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1 -; AVX2-NEXT: jmp .LBB80_24 -; AVX2-NEXT: .LBB80_22: -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: shrq %rcx -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1 -; AVX2-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: .LBB80_24: -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[0] -; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3] +; AVX2-NEXT: vpextrq $1, %xmm2, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[0] +; AVX2-NEXT: vaddps %xmm2, %xmm2, %xmm3 +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3] +; AVX2-NEXT: vblendvps %xmm0, %xmm3, %xmm2, %xmm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: uitofp_load_8i64_to_8f32: