Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -15742,23 +15742,39 @@ EVT VT = N->getValueType(0); // Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern - // with a VECTOR_SHUFFLE. + // with a VECTOR_SHUFFLE and possible truncate. if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { SDValue InVec = InVal->getOperand(0); SDValue EltNo = InVal->getOperand(1); - - // FIXME: We could support implicit truncation if the shuffle can be - // scaled to a smaller vector scalar type. + auto InVecT = InVec.getValueType(); ConstantSDNode *C0 = dyn_cast(EltNo); - if (C0 && VT == InVec.getValueType() && - VT.getScalarType() == InVal.getValueType()) { - SmallVector NewMask(VT.getVectorNumElements(), -1); + + if (C0) { + SmallVector NewMask(InVecT.getVectorNumElements(), -1); int Elt = C0->getZExtValue(); NewMask[0] = Elt; - - if (TLI.isShuffleMaskLegal(NewMask, VT)) - return DAG.getVectorShuffle(VT, SDLoc(N), InVec, DAG.getUNDEF(VT), - NewMask); + SDValue Val; + if (VT.getVectorNumElements() <= InVecT.getVectorNumElements() && + TLI.isShuffleMaskLegal(NewMask, VT)) { + Val = DAG.getVectorShuffle(InVecT, SDLoc(N), InVec, + DAG.getUNDEF(InVecT), NewMask); + // If the initial vector is the correct size this shuffle is a + // valid result. + if (VT == InVecT) + return Val; + // If not we can truncate the vector. + if (VT.getScalarType() == InVecT.getVectorElementType() && + VT.getVectorNumElements() != InVecT.getVectorNumElements()) { + MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout()); + SDValue ZeroIdx = DAG.getConstant(0, SDLoc(N), IdxTy); + EVT SubVT = + EVT::getVectorVT(*DAG.getContext(), InVecT.getVectorElementType(), + VT.getVectorNumElements()); + Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), SubVT, Val, + ZeroIdx); + return Val; + } + } } } Index: test/CodeGen/AArch64/arm64-neon-copy.ll =================================================================== --- test/CodeGen/AArch64/arm64-neon-copy.ll +++ test/CodeGen/AArch64/arm64-neon-copy.ll @@ -188,7 +188,7 @@ define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) { ; CHECK-LABEL: ins2f1: -; CHECK: mov {{d[0-9]+}}, {{v[0-9]+}}.d[1] +; CHECK: dup {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1] %tmp3 = extractelement <2 x double> %tmp1, i32 1 %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0 ret <1 x double> %tmp4 Index: test/CodeGen/AArch64/neon-scalar-copy.ll =================================================================== --- test/CodeGen/AArch64/neon-scalar-copy.ll +++ test/CodeGen/AArch64/neon-scalar-copy.ll @@ -79,8 +79,7 @@ define <1 x i8> @test_vector_dup_bv16B(<16 x i8> %v1) #0 { ; CHECK-LABEL: test_vector_dup_bv16B: - ; CHECK-NEXT: umov [[W:w[0-9]+]], v0.b[14] - ; CHECK-NEXT: fmov s0, [[W]] + ; CHECK-NEXT: dup v0.16b, v0.b[14] ; CHECK-NEXT: ret %shuffle.i = shufflevector <16 x i8> %v1, <16 x i8> undef, <1 x i32> ret <1 x i8> %shuffle.i @@ -96,8 +95,7 @@ define <1 x i16> @test_vector_dup_hv8H(<8 x i16> %v1) #0 { ; CHECK-LABEL: test_vector_dup_hv8H: - ; CHECK-NEXT: umov [[W:w[0-9]+]], v0.h[7] - ; CHECK-NEXT: fmov s0, [[W]] + ; CHECK-NEXT: dup v0.8h, v0.h[7] ; CHECK-NEXT: ret %shuffle.i = shufflevector <8 x i16> %v1, <8 x i16> undef, <1 x i32> ret <1 x i16> %shuffle.i @@ -113,8 +111,7 @@ define <1 x i32> @test_vector_dup_sv4S(<4 x i32> %v1) #0 { ; CHECK-LABEL: test_vector_dup_sv4S: - ; CHECK-NEXT: mov [[W:w[0-9]+]], v0.s[3] - ; CHECK-NEXT: fmov s0, [[W]] + ; CHECK-NEXT: dup v0.4s, v0.s[3] ; CHECK-NEXT: ret %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <1 x i32> ret <1 x i32> %shuffle @@ -138,7 +135,7 @@ define <1 x i64> @test_vector_copy_dup_dv2D(<1 x i64> %a, <2 x i64> %c) #0 { ; CHECK-LABEL: test_vector_copy_dup_dv2D: - ; CHECK-NEXT: {{dup|mov}} {{d[0-9]+}}, {{v[0-9]+}}.d[1] + ; CHECK-NEXT: dup v0.2d, v1.d[1] ; CHECK-NEXT: ret %vget_lane = extractelement <2 x i64> %c, i32 1 %vset_lane = insertelement <1 x i64> undef, i64 %vget_lane, i32 0