Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -12780,18 +12780,21 @@ unsigned LastLegalVectorType = 1; bool LastIntegerTrunc = false; bool NonZero = false; + unsigned FirstZeroAfterNonZero = NumConsecutiveStores; for (unsigned i = 0; i < NumConsecutiveStores; ++i) { StoreSDNode *ST = cast(StoreNodes[i].MemNode); SDValue StoredVal = ST->getValue(); - - if (ConstantSDNode *C = dyn_cast(StoredVal)) { - NonZero |= !C->isNullValue(); - } else if (ConstantFPSDNode *C = - dyn_cast(StoredVal)) { - NonZero |= !C->getConstantFPValue()->isNullValue(); + bool IsElementZero = false; + if (ConstantSDNode *C = dyn_cast(StoredVal)) + IsElementZero = C->isNullValue(); + else if (ConstantFPSDNode *C = dyn_cast(StoredVal)) + IsElementZero = C->getConstantFPValue()->isNullValue(); + if (IsElementZero) { + if (NonZero && FirstZeroAfterNonZero == NumConsecutiveStores) + FirstZeroAfterNonZero = i; } else { - // Non-constant. - break; + if (NonZero) FirstZeroAfterNonZero = i; + NonZero = true; } // Find a legal type for the constant store. @@ -12844,7 +12847,19 @@ // Check if we found a legal integer type that creates a meaningful merge. if (LastLegalType < 2 && LastLegalVectorType < 2) { - StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 1); + // We have already checked that candidate stores are in order + // and of correct shape. While there is no mergable sequence + // from the beggining one may start later in the sequence. The + // only reason a merge of size N could have failed where + // another of the same size would not have does not is if the + // alignment has improved or we've dropped a non-zero + // value. Drop as many candidates as we can here. + unsigned NumSkip = 1; + while ((NumSkip < NumConsecutiveStores) && (NumSkip < FirstZeroAfterNonZero) && + (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign)) { + NumSkip++; + } + StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip); continue; } @@ -12900,6 +12915,23 @@ NumStoresToMerge = i + 1; } + // Check if we found a legal integer type that creates a meaningful merge. + if (NumStoresToMerge < 2) { + // We have already checked that candidate stores are in order + // and of correct shape. While there is no mergable sequence + // from the beggining one may start later in the sequence. The + // only reason a merge of size N could have failed where + // another of the same size would not have does not is if the + // alignment has improved. Drop as many candidates as we can here. + unsigned NumSkip = 1; + while ((NumSkip < NumConsecutiveStores) && + (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign)) + NumSkip++; + + StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip); + continue; + } + bool Merged = MergeStoresOfConstantsOrVecElts( StoreNodes, MemVT, NumStoresToMerge, false, true, false); if (!Merged) { @@ -15710,23 +15742,38 @@ EVT VT = N->getValueType(0); // Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern - // with a VECTOR_SHUFFLE. + // with a VECTOR_SHUFFLE and possible truncate. if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { SDValue InVec = InVal->getOperand(0); SDValue EltNo = InVal->getOperand(1); - - // FIXME: We could support implicit truncation if the shuffle can be - // scaled to a smaller vector scalar type. + auto InVecT = InVec.getValueType(); ConstantSDNode *C0 = dyn_cast(EltNo); - if (C0 && VT == InVec.getValueType() && - VT.getScalarType() == InVal.getValueType()) { - SmallVector NewMask(VT.getVectorNumElements(), -1); + + if (C0) { + SmallVector NewMask(InVecT.getVectorNumElements(), -1); int Elt = C0->getZExtValue(); NewMask[0] = Elt; - - if (TLI.isShuffleMaskLegal(NewMask, VT)) - return DAG.getVectorShuffle(VT, SDLoc(N), InVec, DAG.getUNDEF(VT), - NewMask); + SDValue Val; + if (VT.getVectorNumElements() <= InVecT.getVectorNumElements() && + TLI.isShuffleMaskLegal(NewMask, VT)) { + Val = DAG.getVectorShuffle(InVecT, SDLoc(N), InVec, + DAG.getUNDEF(InVecT), NewMask); + // If the initial vector is the correct size this shuffle is a + // valid result. + if (VT == InVecT) + return Val; + // If not we can truncate the vector. + if (VT.getScalarType() == InVecT.getVectorElementType()) { + MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout()); + SDValue ZeroIdx = DAG.getConstant(0, SDLoc(N), IdxTy); + EVT SubVT = + EVT::getVectorVT(*DAG.getContext(), InVecT.getVectorElementType(), + VT.getVectorNumElements()); + Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), SubVT, Val, + ZeroIdx); + return Val; + } + } } } Index: test/CodeGen/AArch64/arm64-neon-copy.ll =================================================================== --- test/CodeGen/AArch64/arm64-neon-copy.ll +++ test/CodeGen/AArch64/arm64-neon-copy.ll @@ -188,7 +188,7 @@ define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) { ; CHECK-LABEL: ins2f1: -; CHECK: mov {{d[0-9]+}}, {{v[0-9]+}}.d[1] +; CHECK: dup {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1] %tmp3 = extractelement <2 x double> %tmp1, i32 1 %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0 ret <1 x double> %tmp4 Index: test/CodeGen/AArch64/neon-scalar-copy.ll =================================================================== --- test/CodeGen/AArch64/neon-scalar-copy.ll +++ test/CodeGen/AArch64/neon-scalar-copy.ll @@ -79,8 +79,7 @@ define <1 x i8> @test_vector_dup_bv16B(<16 x i8> %v1) #0 { ; CHECK-LABEL: test_vector_dup_bv16B: - ; CHECK-NEXT: umov [[W:w[0-9]+]], v0.b[14] - ; CHECK-NEXT: fmov s0, [[W]] + ; CHECK-NEXT: dup v0.16b, v0.b[14] ; CHECK-NEXT: ret %shuffle.i = shufflevector <16 x i8> %v1, <16 x i8> undef, <1 x i32> ret <1 x i8> %shuffle.i @@ -96,8 +95,7 @@ define <1 x i16> @test_vector_dup_hv8H(<8 x i16> %v1) #0 { ; CHECK-LABEL: test_vector_dup_hv8H: - ; CHECK-NEXT: umov [[W:w[0-9]+]], v0.h[7] - ; CHECK-NEXT: fmov s0, [[W]] + ; CHECK-NEXT: dup v0.8h, v0.h[7] ; CHECK-NEXT: ret %shuffle.i = shufflevector <8 x i16> %v1, <8 x i16> undef, <1 x i32> ret <1 x i16> %shuffle.i @@ -113,8 +111,7 @@ define <1 x i32> @test_vector_dup_sv4S(<4 x i32> %v1) #0 { ; CHECK-LABEL: test_vector_dup_sv4S: - ; CHECK-NEXT: mov [[W:w[0-9]+]], v0.s[3] - ; CHECK-NEXT: fmov s0, [[W]] + ; CHECK-NEXT: dup v0.4s, v0.s[3] ; CHECK-NEXT: ret %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <1 x i32> ret <1 x i32> %shuffle @@ -138,7 +135,7 @@ define <1 x i64> @test_vector_copy_dup_dv2D(<1 x i64> %a, <2 x i64> %c) #0 { ; CHECK-LABEL: test_vector_copy_dup_dv2D: - ; CHECK-NEXT: {{dup|mov}} {{d[0-9]+}}, {{v[0-9]+}}.d[1] + ; CHECK-NEXT: dup v0.2d, v1.d[1] ; CHECK-NEXT: ret %vget_lane = extractelement <2 x i64> %c, i32 1 %vset_lane = insertelement <1 x i64> undef, i64 %vget_lane, i32 0