diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -113,8 +113,9 @@ // XLenVT index (either constant or non-constant). VSLIDEUP, VSLIDEDOWN, - // Matches the semantics of the unmasked vid.v instruction. - VID, + // Matches the semantics of the vid.v instruction, with a mask and VL + // operand. + VID_VL, // Matches the semantics of the vfcnvt.rod function (Convert double-width // float to single-width float, rounding towards odd). Takes a double-width // float vector and produces a single-width float vector. diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -821,19 +821,35 @@ MVT VT = Op.getSimpleValueType(); assert(VT.isFixedLengthVector() && "Unexpected vector!"); - if (SDValue Splat = cast(Op)->getSplatValue()) { - MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); + MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); - SDLoc DL(Op); - SDValue VL = - DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); + SDLoc DL(Op); + SDValue VL = + DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); + if (SDValue Splat = cast(Op)->getSplatValue()) { unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL); return convertFromScalableVector(VT, Splat, DAG, Subtarget); } + // Try and match an index sequence, which we can lower directly to the vid + // instruction. An all-undef vector is matched by getSplatValue, above. + bool IsVID = true; + if (VT.isInteger()) + for (unsigned i = 0, e = Op.getNumOperands(); i < e && IsVID; i++) + IsVID &= Op.getOperand(i).isUndef() || + (isa(Op.getOperand(i)) && + Op.getConstantOperandVal(i) == i); + + if (IsVID) { + MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); + SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); + SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL); + return convertFromScalableVector(VT, VID, DAG, Subtarget); + } + return SDValue(); } @@ -1706,12 +1722,15 @@ SDValue SplattedVal = DAG.getSplatVector(VecVT, DL, Val); SDValue SplattedIdx = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Idx); - SDValue VID = DAG.getNode(RISCVISD::VID, DL, VecVT); + SDValue VL = DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()); + MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); + SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); + SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VecVT, Mask, VL); auto SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VecVT); - SDValue Mask = DAG.getSetCC(DL, SetCCVT, VID, SplattedIdx, ISD::SETEQ); + SDValue SelectCond = DAG.getSetCC(DL, SetCCVT, VID, SplattedIdx, ISD::SETEQ); - return DAG.getNode(ISD::VSELECT, DL, VecVT, Mask, SplattedVal, Vec); + return DAG.getNode(ISD::VSELECT, DL, VecVT, SelectCond, SplattedVal, Vec); } // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then @@ -4586,7 +4605,7 @@ NODE_NAME_CASE(VLEFF_MASK) NODE_NAME_CASE(VSLIDEUP) NODE_NAME_CASE(VSLIDEDOWN) - NODE_NAME_CASE(VID) + NODE_NAME_CASE(VID_VL) NODE_NAME_CASE(VFNCVT_ROD) NODE_NAME_CASE(VECREDUCE_ADD) NODE_NAME_CASE(VECREDUCE_UMAX) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -819,13 +819,6 @@ vti.AVL, vti.SEW)>; } -//===----------------------------------------------------------------------===// -// Miscellaneous RISCVISD SDNodes -//===----------------------------------------------------------------------===// - -def riscv_vid - : SDNode<"RISCVISD::VID", SDTypeProfile<1, 0, [SDTCisVec<0>]>, []>; - def SDTRVVSlide : SDTypeProfile<1, 3, [ SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT> ]>; @@ -835,10 +828,6 @@ let Predicates = [HasStdExtV] in { -foreach vti = AllIntegerVectors in - def : Pat<(vti.Vector riscv_vid), - (!cast("PseudoVID_V_"#vti.LMul.MX) vti.AVL, vti.SEW)>; - foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in { def : Pat<(vti.Vector (riscv_slideup (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -210,3 +210,20 @@ } } // Predicates = [HasStdExtV, HasStdExtF] + +//===----------------------------------------------------------------------===// +// Miscellaneous RISCVISD SDNodes +//===----------------------------------------------------------------------===// + +def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, + [SDTCisVec<0>, SDTCisVec<1>, SDTCVecEltisVT<1, i1>, + SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; + +let Predicates = [HasStdExtV] in { + +foreach vti = AllIntegerVectors in + def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask true_mask), + (XLenVT (VLOp GPR:$vl)))), + (!cast("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.SEW)>; + +} // Predicates = [HasStdExtV] diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -0,0 +1,72 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s + +define void @buildvec_vid_v16i8(<16 x i8>* %x) { +; CHECK-LABEL: buildvec_vid_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a1, zero, 16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vid.v v25 +; CHECK-NEXT: vse8.v v25, (a0) +; CHECK-NEXT: ret + store <16 x i8> , <16 x i8>* %x + ret void +} + +define void @buildvec_vid_undefelts_v16i8(<16 x i8>* %x) { +; CHECK-LABEL: buildvec_vid_undefelts_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a1, zero, 16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vid.v v25 +; CHECK-NEXT: vse8.v v25, (a0) +; CHECK-NEXT: ret + store <16 x i8> , <16 x i8>* %x + ret void +} + +; TODO: Could do VID then insertelement on missing elements +define void @buildvec_notquite_vid_v16i8(<16 x i8>* %x) { +; CHECK-LABEL: buildvec_notquite_vid_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a1, %hi(.LCPI2_0) +; CHECK-NEXT: addi a1, a1, %lo(.LCPI2_0) +; CHECK-NEXT: addi a2, zero, 16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vse8.v v25, (a0) +; CHECK-NEXT: ret + store <16 x i8> , <16 x i8>* %x + ret void +} + +; TODO: Could do VID then add a constant splat +define void @buildvec_vid_plus_imm_v16i8(<16 x i8>* %x) { +; CHECK-LABEL: buildvec_vid_plus_imm_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a1, %hi(.LCPI3_0) +; CHECK-NEXT: addi a1, a1, %lo(.LCPI3_0) +; CHECK-NEXT: addi a2, zero, 16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vse8.v v25, (a0) +; CHECK-NEXT: ret + store <16 x i8> , <16 x i8>* %x + ret void +} + +; TODO: Could do VID then multiply by a constant splat +define void @buildvec_vid_mpy_imm_v16i8(<16 x i8>* %x) { +; CHECK-LABEL: buildvec_vid_mpy_imm_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a1, %hi(.LCPI4_0) +; CHECK-NEXT: addi a1, a1, %lo(.LCPI4_0) +; CHECK-NEXT: addi a2, zero, 16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vse8.v v25, (a0) +; CHECK-NEXT: ret + store <16 x i8> , <16 x i8>* %x + ret void +}