Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -42430,6 +42430,41 @@ return SDValue(); } +/// If we are converting a value to floating-point, try to replace scalar +/// truncate of an extracted vector element with a bitcast. This tries to keep +/// the sequence on XMM registers rather than moving between vector and GPRs. +static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) { + // TODO: This is currently only used by combineSIntToFP, but it is generalized + // to allow being called by any similar cast opcode. + // TODO: Consider merging this into lowering: vectorizeExtractedCast(). + SDValue Trunc = N->getOperand(0); + if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE) + return SDValue(); + + SDValue ExtElt = Trunc.getOperand(0); + if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + !isNullConstant(ExtElt.getOperand(1))) + return SDValue(); + + EVT TruncVT = Trunc.getValueType(); + EVT SrcVT = ExtElt.getValueType(); + unsigned DestWidth = TruncVT.getSizeInBits(); + unsigned SrcWidth = SrcVT.getSizeInBits(); + if (SrcWidth % DestWidth != 0) + return SDValue(); + + // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0) + EVT SrcVecVT = ExtElt.getOperand(0).getValueType(); + unsigned VecWidth = SrcVecVT.getSizeInBits(); + unsigned NumElts = VecWidth / DestWidth; + EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts); + SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0)); + SDLoc DL(N); + SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT, + BitcastVec, ExtElt.getOperand(1)); + return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt); +} + static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget) { SDValue Op0 = N->getOperand(0); @@ -42523,6 +42558,10 @@ return FILDChain; } } + + if (SDValue V = combineToFPTruncExtElt(N, DAG)) + return V; + return SDValue(); } Index: llvm/trunk/test/CodeGen/X86/known-bits-vector.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/known-bits-vector.ll +++ llvm/trunk/test/CodeGen/X86/known-bits-vector.ll @@ -33,9 +33,8 @@ ; ; X64-LABEL: knownbits_mask_extract_uitofp: ; X64: # %bb.0: -; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: movzwl %ax, %eax -; X64-NEXT: vcvtsi2ss %eax, %xmm1, %xmm0 +; X64-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = and <2 x i64> %a0, %2 = extractelement <2 x i64> %1, i32 0 Index: llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll +++ llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll @@ -67,9 +67,8 @@ ; ; X64-LABEL: signbits_ashr_extract_sitofp_0: ; X64: # %bb.0: -; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: shrq $32, %rax -; X64-NEXT: vcvtsi2ss %eax, %xmm1, %xmm0 +; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,3] +; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, %2 = extractelement <2 x i64> %1, i32 0 @@ -90,9 +89,8 @@ ; ; X64-LABEL: signbits_ashr_extract_sitofp_1: ; X64: # %bb.0: -; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: shrq $32, %rax -; X64-NEXT: vcvtsi2ss %eax, %xmm1, %xmm0 +; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,3] +; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, %2 = extractelement <2 x i64> %1, i32 0 @@ -115,10 +113,10 @@ ; ; X64-LABEL: signbits_ashr_shl_extract_sitofp: ; X64: # %bb.0: -; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: sarq $61, %rax -; X64-NEXT: shll $20, %eax -; X64-NEXT: vcvtsi2ss %eax, %xmm1, %xmm0 +; X64-NEXT: vpsrad $29, %xmm0, %xmm0 +; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; X64-NEXT: vpsllq $20, %xmm0, %xmm0 +; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, %2 = shl <2 x i64> %1, @@ -147,8 +145,9 @@ ; X64-LABEL: signbits_ashr_insert_ashr_extract_sitofp: ; X64: # %bb.0: ; X64-NEXT: sarq $30, %rdi -; X64-NEXT: shrq $3, %rdi -; X64-NEXT: vcvtsi2ss %edi, %xmm0, %xmm0 +; X64-NEXT: vmovq %rdi, %xmm0 +; X64-NEXT: vpsrlq $3, %xmm0, %xmm0 +; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = ashr i64 %a0, 30 %2 = insertelement <2 x i64> undef, i64 %1, i32 0 @@ -234,8 +233,7 @@ ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; X64-NEXT: vmovd %edi, %xmm1 ; X64-NEXT: vpand %xmm1, %xmm0, %xmm0 -; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, %2 = sext i32 %a2 to i64 @@ -280,8 +278,7 @@ ; X64-NEXT: vpand %xmm1, %xmm0, %xmm2 ; X64-NEXT: vpor %xmm1, %xmm2, %xmm1 ; X64-NEXT: vpxor %xmm0, %xmm1, %xmm0 -; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: vcvtsi2ss %eax, %xmm3, %xmm0 +; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32>