diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1749,7 +1749,8 @@ // Note that this method will also match potentially unappealing index // sequences, like , however it is left to the caller to // determine whether this is worth generating code for. -static Optional isSimpleVIDSequence(SDValue Op) { +static Optional +isSimpleVIDSequence(SDValue Op, bool IgnoreFirstElement = false) { unsigned NumElts = Op.getNumOperands(); assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR"); if (!Op.getValueType().isInteger()) @@ -1759,7 +1760,7 @@ Optional SeqStepNum, SeqAddend; Optional> PrevElt; unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits(); - for (unsigned Idx = 0; Idx < NumElts; Idx++) { + for (unsigned Idx = IgnoreFirstElement ? 1 : 0; Idx < NumElts; Idx++) { // Assume undef elements match the sequence; we just have to be careful // when interpolating across them. if (Op.getOperand(Idx).isUndef()) @@ -1956,7 +1957,15 @@ // Try and match index sequences, which we can lower to the vid instruction // with optional modifications. An all-undef vector is matched by // getSplatValue, above. - if (auto SimpleVID = isSimpleVIDSequence(Op)) { + auto SimpleVID = isSimpleVIDSequence(Op); + // We could use vmv.s.x to set the first element singly. So we will try to + // match in the situation that exclude the first element. + bool InconsistentFirst = false; + if (!SimpleVID) { + SimpleVID = isSimpleVIDSequence(Op, true); + InconsistentFirst = true; + } + if (SimpleVID) { int64_t StepNumerator = SimpleVID->StepNumerator; unsigned StepDenominator = SimpleVID->StepDenominator; int64_t Addend = SimpleVID->Addend; @@ -2002,6 +2011,13 @@ DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT)); VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID); } + if (InconsistentFirst) { + auto FirstElement = Op.getOperand(0); + VID = convertToScalableVector(ContainerVT, VID, DAG, Subtarget); + VID = DAG.getNode(RISCVISD::VMV_S_X_VL, DL, ContainerVT, VID, + FirstElement, VL); + VID = convertFromScalableVector(VT, VID, DAG, Subtarget); + } return VID; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -663,3 +663,49 @@ store <8 x i16> , <8 x i16>* %x ret void } + +define void @buildvec_vid_vmv_v8i16(<8 x i16>* %x) { +; CHECK-LABEL: buildvec_vid_vmv_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: li a1, 77 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; CHECK-NEXT: vmv.s.x v8, a1 +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret + store <8 x i16> , <8 x i16>* %x + ret void +} + +define void @buildvec_vid_mpy_vmv_imm_v8i16(<8 x i16>* %x) { +; CHECK-LABEL: buildvec_vid_mpy_vmv_imm_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: li a1, 17 +; CHECK-NEXT: vmul.vx v8, v8, a1 +; CHECK-NEXT: li a1, 77 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; CHECK-NEXT: vmv.s.x v8, a1 +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret + store <8 x i16> , <8 x i16>* %x + ret void +} + +define void @buildvec_vid_shl_vmv_imm_v8i16(<8 x i16>* %x) { +; CHECK-LABEL: buildvec_vid_shl_vmv_imm_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: vsll.vi v8, v8, 9 +; CHECK-NEXT: li a1, 77 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; CHECK-NEXT: vmv.s.x v8, a1 +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret + store <8 x i16> , <8 x i16>* %x + ret void +} +