diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -35125,123 +35125,6 @@ Op, DemandedBits, DemandedElts, DAG, Depth); } -/// Check if a vector extract from a target-specific shuffle of a load can be -/// folded into a single element load. -/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but -/// shuffles have been custom lowered so we need to handle those here. -static SDValue -XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, - TargetLowering::DAGCombinerInfo &DCI) { - if (DCI.isBeforeLegalizeOps()) - return SDValue(); - - SDValue InVec = N->getOperand(0); - SDValue EltNo = N->getOperand(1); - EVT EltVT = N->getValueType(0); - - if (!isa(EltNo)) - return SDValue(); - - EVT OriginalVT = InVec.getValueType(); - unsigned NumOriginalElts = OriginalVT.getVectorNumElements(); - - // Peek through bitcasts, don't duplicate a load with other uses. - InVec = peekThroughOneUseBitcasts(InVec); - - EVT CurrentVT = InVec.getValueType(); - if (!CurrentVT.isVector()) - return SDValue(); - - unsigned NumCurrentElts = CurrentVT.getVectorNumElements(); - if ((NumOriginalElts % NumCurrentElts) != 0) - return SDValue(); - - if (!isTargetShuffle(InVec.getOpcode())) - return SDValue(); - - // Don't duplicate a load with other uses. - if (!InVec.hasOneUse()) - return SDValue(); - - SmallVector ShuffleMask; - SmallVector ShuffleOps; - bool UnaryShuffle; - if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true, - ShuffleOps, ShuffleMask, UnaryShuffle)) - return SDValue(); - - unsigned Scale = NumOriginalElts / NumCurrentElts; - if (Scale > 1) { - SmallVector ScaledMask; - scaleShuffleMask(Scale, ShuffleMask, ScaledMask); - ShuffleMask = std::move(ScaledMask); - } - assert(ShuffleMask.size() == NumOriginalElts && "Shuffle mask size mismatch"); - - // Select the input vector, guarding against out of range extract vector. - int Elt = cast(EltNo)->getZExtValue(); - int Idx = (Elt > (int)NumOriginalElts) ? SM_SentinelUndef : ShuffleMask[Elt]; - - if (Idx == SM_SentinelZero) - return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT) - : DAG.getConstantFP(+0.0, SDLoc(N), EltVT); - if (Idx == SM_SentinelUndef) - return DAG.getUNDEF(EltVT); - - // Bail if any mask element is SM_SentinelZero - getVectorShuffle below - // won't handle it. - if (llvm::any_of(ShuffleMask, [](int M) { return M == SM_SentinelZero; })) - return SDValue(); - - assert(0 <= Idx && Idx < (int)(2 * NumOriginalElts) && - "Shuffle index out of range"); - SDValue LdNode = (Idx < (int)NumOriginalElts) ? ShuffleOps[0] : ShuffleOps[1]; - - // If inputs to shuffle are the same for both ops, then allow 2 uses - unsigned AllowedUses = - (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1; - - if (LdNode.getOpcode() == ISD::BITCAST) { - // Don't duplicate a load with other uses. - if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0)) - return SDValue(); - - AllowedUses = 1; // only allow 1 load use if we have a bitcast - LdNode = LdNode.getOperand(0); - } - - if (!ISD::isNormalLoad(LdNode.getNode())) - return SDValue(); - - LoadSDNode *LN0 = cast(LdNode); - - if (!LN0 || !LN0->hasNUsesOfValue(AllowedUses, 0) || !LN0->isSimple()) - return SDValue(); - - // If there's a bitcast before the shuffle, check if the load type and - // alignment is valid. - unsigned Align = LN0->getAlignment(); - const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( - EltVT.getTypeForEVT(*DAG.getContext())); - - if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT)) - return SDValue(); - - // All checks match so transform back to vector_shuffle so that DAG combiner - // can finish the job - SDLoc dl(N); - - // Create shuffle node taking into account the case that its a unary shuffle - SDValue Shuffle = UnaryShuffle ? DAG.getUNDEF(OriginalVT) - : DAG.getBitcast(OriginalVT, ShuffleOps[1]); - Shuffle = DAG.getVectorShuffle(OriginalVT, dl, - DAG.getBitcast(OriginalVT, ShuffleOps[0]), - Shuffle, ShuffleMask); - return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle, - EltNo); -} - // Helper to peek through bitops/setcc to determine size of source vector. // Allows combineBitcastvxi1 to determine what size vector generated a . static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) { @@ -36228,6 +36111,37 @@ return DAG.getZExtOrTrunc(ExtOp, dl, VT); } + // Count how many times the target shuffle used SrcOp. + unsigned RepeatedOps = + count_if(Ops, [SrcOp](SDValue V) { return V == SrcOp; }); + + // If this is a one-use load, attempt to extract the element directly by + // removing the bitcasts between the extract, shuffle and the load. + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + bool SrcOneUse = SrcBC == peekThroughOneUseBitcasts(Src); + if (SrcOneUse && VT == SrcVT.getScalarType() && + SrcOp->hasNUsesOfValue(RepeatedOps, 0) && + TLI.isOperationLegalOrCustom(ISD::LOAD, VT) && + TLI.isOperationLegalOrCustom(ISD::LOAD, SrcVT)) { + SDValue SrcOpBC = peekThroughOneUseBitcasts(SrcOp); + if (ISD::isNormalLoad(SrcOpBC.getNode())) { + auto *LD = cast(SrcOpBC); + unsigned Align = LD->getAlignment(); + unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( + SrcVT.getTypeForEVT(*DAG.getContext())); + if (LD->isSimple() && Align >= NewAlign) { + if (SrcVT != SrcOpBC.getValueType()) { + SrcOpBC = DAG.getLoad(SrcVT, dl, LD->getChain(), LD->getBasePtr(), + LD->getPointerInfo(), Align, + LD->getMemOperand()->getFlags()); + DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), SrcOpBC.getValue(1)); + } + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, SrcOpBC, + DAG.getConstant(SrcIdx, dl, Idx.getValueType())); + } + } + } + return SDValue(); } @@ -36495,14 +36409,11 @@ } // TODO - Remove this once we can handle the implicit zero-extension of - // X86ISD::PEXTRW/X86ISD::PEXTRB in XFormVExtractWithShuffleIntoLoad, - // combineHorizontalPredicateResult and combineBasicSADPattern. + // X86ISD::PEXTRW/X86ISD::PEXTRB in combineHorizontalPredicateResult and + // combineBasicSADPattern. return SDValue(); } - if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI)) - return NewOp; - // Detect mmx extraction of all bits as a i64. It works better as a bitcast. if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() && VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) { diff --git a/llvm/test/CodeGen/X86/extractelement-load.ll b/llvm/test/CodeGen/X86/extractelement-load.ll --- a/llvm/test/CodeGen/X86/extractelement-load.ll +++ b/llvm/test/CodeGen/X86/extractelement-load.ll @@ -119,3 +119,49 @@ store volatile double %vecext, double* %a1, align 8 ret void } + +define void @PR43971(<8 x float> *%a0, float *%a1) { +; X32-SSE2-LABEL: PR43971: +; X32-SSE2: # %bb.0: # %entry +; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-SSE2-NEXT: xorps %xmm1, %xmm1 +; X32-SSE2-NEXT: cmpltss %xmm0, %xmm1 +; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; X32-SSE2-NEXT: andps %xmm1, %xmm2 +; X32-SSE2-NEXT: andnps %xmm0, %xmm1 +; X32-SSE2-NEXT: orps %xmm2, %xmm1 +; X32-SSE2-NEXT: movss %xmm1, (%eax) +; X32-SSE2-NEXT: retl +; +; X64-SSSE3-LABEL: PR43971: +; X64-SSSE3: # %bb.0: # %entry +; X64-SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X64-SSSE3-NEXT: xorps %xmm1, %xmm1 +; X64-SSSE3-NEXT: cmpltss %xmm0, %xmm1 +; X64-SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; X64-SSSE3-NEXT: andps %xmm1, %xmm2 +; X64-SSSE3-NEXT: andnps %xmm0, %xmm1 +; X64-SSSE3-NEXT: orps %xmm2, %xmm1 +; X64-SSSE3-NEXT: movss %xmm1, (%rsi) +; X64-SSSE3-NEXT: retq +; +; X64-AVX-LABEL: PR43971: +; X64-AVX: # %bb.0: # %entry +; X64-AVX-NEXT: vpermilpd {{.*#+}} xmm0 = mem[1,0] +; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X64-AVX-NEXT: vcmpltss 24(%rdi), %xmm1, %xmm1 +; X64-AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; X64-AVX-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 +; X64-AVX-NEXT: vmovss %xmm0, (%rsi) +; X64-AVX-NEXT: retq +entry: + %0 = load <8 x float>, <8 x float>* %a0, align 32 + %vecext = extractelement <8 x float> %0, i32 6 + %cmp = fcmp ogt float %vecext, 0.000000e+00 + %1 = load float, float* %a1, align 4 + %cond = select i1 %cmp, float %1, float %vecext + store float %cond, float* %a1, align 4 + ret void +}