Index: llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -4680,6 +4680,24 @@ SDValue DAGTypeLegalizer::PromoteIntOp_CONCAT_VECTORS(SDNode *N) { SDLoc dl(N); + + EVT ResVT = N->getValueType(0); + if (ResVT.isScalableVector()) { + SDValue Op0 = N->getOperand(0); + SDValue Op1 = N->getOperand(1); + + if (N->getNumOperands() > 2) + report_fatal_error("Concat of more than two " + "scalable vectors is not supported"); + + SDValue Vec0 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, + DAG.getUNDEF(ResVT), Op0, + DAG.getIntPtrConstant(0, dl)); + return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec0, Op1, + DAG.getIntPtrConstant(ResVT.getVectorMinNumElements()/2, + dl)); + } + unsigned NumElems = N->getNumOperands(); EVT RetSclrTy = N->getValueType(0).getVectorElementType(); Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -952,7 +952,8 @@ setOperationAction(ISD::TRUNCATE, VT, Custom); setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); } - } + } else + setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); } for (auto VT : {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}) @@ -8936,15 +8937,48 @@ assert(Op.getValueType().isScalableVector() && "Only expect to lower inserts into scalable vectors!"); - EVT InVT = Op.getOperand(1).getValueType(); + EVT Op1VT = Op.getOperand(1).getValueType(); unsigned Idx = cast(Op.getOperand(2))->getZExtValue(); - // We don't have any patterns for scalable vector yet. - if (InVT.isScalableVector()) + if (Op1VT.isScalableVector()) { + SDLoc DL(Op); + EVT VT = Op.getValueType(); + EVT Op0VT = Op.getOperand(0).getValueType(); + + if (!Op0VT.isInteger() || !Op1VT.isInteger()) + return SDValue(); + + SDValue Vec0 = Op.getOperand(0); + SDValue Vec1 = Op.getOperand(1); + + // Ensure the subvector is half the size of the main vector. + if (Op0VT.getVectorElementCount() != (Op1VT.getVectorElementCount() * 2)) + return SDValue(); + + // Extend elements of smaller vector... + EVT WideVT = Op1VT.widenIntegerVectorElementType(*(DAG.getContext())); + SDValue ExtVec = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Vec1); + + if (Idx == 0) { + SDValue HiVec0; + if (Vec0.isUndef()) + HiVec0 = DAG.getUNDEF(WideVT); + else + HiVec0 = DAG.getNode(AArch64ISD::UUNPKHI, DL, WideVT, Vec0); + return DAG.getNode(AArch64ISD::UZP1, DL, VT, ExtVec, HiVec0); + } else if (Idx == Op1VT.getVectorMinNumElements()) { + SDValue LoVec0; + if (Vec0.getOpcode() == AArch64ISD::UZP1 && Vec0->getOperand(1).isUndef()) + LoVec0 = Vec0->getOperand(0); + else + LoVec0 = DAG.getNode(AArch64ISD::UUNPKLO, DL, WideVT, Vec0); + return DAG.getNode(AArch64ISD::UZP1, DL, VT, LoVec0, ExtVec); + } return SDValue(); + } // This will be matched by custom code during ISelDAGToDAG. - if (Idx == 0 && isPackedVectorType(InVT, DAG) && Op.getOperand(0).isUndef()) + if (Idx == 0 && isPackedVectorType(Op1VT, DAG) && Op.getOperand(0).isUndef()) return Op; return SDValue(); Index: llvm/test/CodeGen/AArch64/sve-split-trunc.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-split-trunc.ll @@ -0,0 +1,55 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +define @trunc_i16toi8( %in) { +; CHECK-LABEL: trunc_i16toi8: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = trunc %in to + ret %out +} + +define @trunc_i32toi16( %in) { +; CHECK-LABEL: trunc_i32toi16: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = trunc %in to + ret %out +} + +define @trunc_i64toi32( %in) { +; CHECK-LABEL: trunc_i64toi32: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = trunc %in to + ret %out +} + +define @trunc_i64toi16( %in) { +; CHECK-LABEL: trunc_i64toi16: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp1 z2.s, z2.s, z3.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h +; CHECK-NEXT: ret + %out = trunc %in to + ret %out +} + +define @trunc_i64toi8( %in) { +; CHECK-LABEL: trunc_i64toi8: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp1 z6.s, z6.s, z7.s +; CHECK-NEXT: uzp1 z4.s, z4.s, z5.s +; CHECK-NEXT: uzp1 z2.s, z2.s, z3.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z1.h, z4.h, z6.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = trunc %in to + ret %out +}