diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -75,6 +75,12 @@ return selectRVVUimm5(N, Width, Imm); } + void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, + const SDLoc &DL, unsigned CurOp, + bool IsMasked, bool IsStridedOrIndexed, + SmallVectorImpl &Operands, + MVT *IndexVT = nullptr); + void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided); void selectVLSEGFF(SDNode *Node, bool IsMasked); void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -124,33 +124,23 @@ } } -void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked, - bool IsStrided) { - SDLoc DL(Node); - unsigned NF = Node->getNumValues() - 1; - MVT VT = Node->getSimpleValueType(0); - unsigned ScalarSize = VT.getScalarSizeInBits(); - MVT XLenVT = Subtarget->getXLenVT(); - RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); - +void RISCVDAGToDAGISel::addVectorLoadStoreOperands( + SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, + bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl &Operands, + MVT *IndexVT) { SDValue Chain = Node->getOperand(0); SDValue Glue; - unsigned CurOp = 2; - SmallVector Operands; - if (IsMasked) { - SmallVector Regs(Node->op_begin() + CurOp, - Node->op_begin() + CurOp + NF); - SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); - Operands.push_back(MaskedOff); - CurOp += NF; - } SDValue Base; SelectBaseAddr(Node->getOperand(CurOp++), Base); Operands.push_back(Base); // Base pointer. - if (IsStrided) - Operands.push_back(Node->getOperand(CurOp++)); // Stride. + + if (IsStridedOrIndexed) { + Operands.push_back(Node->getOperand(CurOp++)); // Index. + if (IndexVT) + *IndexVT = Operands.back()->getSimpleValueType(0); + } + if (IsMasked) { // Mask needs to be copied to V0. SDValue Mask = Node->getOperand(CurOp++); @@ -161,10 +151,37 @@ SDValue VL; selectVLOp(Node->getOperand(CurOp++), VL); Operands.push_back(VL); + + MVT XLenVT = Subtarget->getXLenVT(); + SDValue SEW = CurDAG->getTargetConstant(SEWImm, DL, XLenVT); Operands.push_back(SEW); + Operands.push_back(Chain); // Chain. if (Glue) Operands.push_back(Glue); +} + +void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked, + bool IsStrided) { + SDLoc DL(Node); + unsigned NF = Node->getNumValues() - 1; + MVT VT = Node->getSimpleValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + + unsigned CurOp = 2; + SmallVector Operands; + if (IsMasked) { + SmallVector Regs(Node->op_begin() + CurOp, + Node->op_begin() + CurOp + NF); + SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); + Operands.push_back(MaskedOff); + CurOp += NF; + } + + addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, IsStrided, + Operands); + const RISCV::VLSEGPseudo *P = RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, ScalarSize, static_cast(LMUL)); @@ -192,10 +209,6 @@ MVT XLenVT = Subtarget->getXLenVT(); unsigned ScalarSize = VT.getScalarSizeInBits(); RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); - - SDValue Chain = Node->getOperand(0); - SDValue Glue; unsigned CurOp = 2; SmallVector Operands; @@ -206,23 +219,10 @@ Operands.push_back(MaskedOff); CurOp += NF; } - SDValue Base; - SelectBaseAddr(Node->getOperand(CurOp++), Base); - Operands.push_back(Base); // Base pointer. - if (IsMasked) { - // Mask needs to be copied to V0. - SDValue Mask = Node->getOperand(CurOp++); - Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue()); - Glue = Chain.getValue(1); - Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType())); - } - SDValue VL; - selectVLOp(Node->getOperand(CurOp++), VL); - Operands.push_back(VL); - Operands.push_back(SEW); - Operands.push_back(Chain); // Chain. - if (Glue) - Operands.push_back(Glue); + + addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, + /*IsStridedOrIndexed*/ false, Operands); + const RISCV::VLSEGPseudo *P = RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true, ScalarSize, static_cast(LMUL)); @@ -252,12 +252,7 @@ unsigned NF = Node->getNumValues() - 1; MVT VT = Node->getSimpleValueType(0); unsigned ScalarSize = VT.getScalarSizeInBits(); - MVT XLenVT = Subtarget->getXLenVT(); RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); - - SDValue Chain = Node->getOperand(0); - SDValue Glue; unsigned CurOp = 2; SmallVector Operands; @@ -268,25 +263,10 @@ Operands.push_back(MaskedOff); CurOp += NF; } - SDValue Base; - SelectBaseAddr(Node->getOperand(CurOp++), Base); - Operands.push_back(Base); // Base pointer. - Operands.push_back(Node->getOperand(CurOp++)); // Index. - MVT IndexVT = Operands.back()->getSimpleValueType(0); - if (IsMasked) { - // Mask needs to be copied to V0. - SDValue Mask = Node->getOperand(CurOp++); - Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue()); - Glue = Chain.getValue(1); - Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType())); - } - SDValue VL; - selectVLOp(Node->getOperand(CurOp++), VL); - Operands.push_back(VL); - Operands.push_back(SEW); - Operands.push_back(Chain); // Chain. - if (Glue) - Operands.push_back(Glue); + + MVT IndexVT; + addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, + /*IsStridedOrIndexed*/ true, Operands, &IndexVT); assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); @@ -323,37 +303,17 @@ NF--; MVT VT = Node->getOperand(2)->getSimpleValueType(0); unsigned ScalarSize = VT.getScalarSizeInBits(); - MVT XLenVT = Subtarget->getXLenVT(); RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); - SDValue Chain = Node->getOperand(0); - SDValue Glue; - SmallVector Operands; Operands.push_back(StoreVal); unsigned CurOp = 2 + NF; - SDValue Base; - SelectBaseAddr(Node->getOperand(CurOp++), Base); - Operands.push_back(Base); // Base pointer. - if (IsStrided) - Operands.push_back(Node->getOperand(CurOp++)); // Stride. - if (IsMasked) { - // Mask needs to be copied to V0. - SDValue Mask = Node->getOperand(CurOp++); - Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue()); - Glue = Chain.getValue(1); - Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType())); - } - SDValue VL; - selectVLOp(Node->getOperand(CurOp++), VL); - Operands.push_back(VL); - Operands.push_back(SEW); - Operands.push_back(Chain); // Chain. - if (Glue) - Operands.push_back(Glue); + + addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, IsStrided, + Operands); + const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo( NF, IsMasked, IsStrided, ScalarSize, static_cast(LMUL)); MachineSDNode *Store = @@ -373,37 +333,17 @@ --NF; MVT VT = Node->getOperand(2)->getSimpleValueType(0); unsigned ScalarSize = VT.getScalarSizeInBits(); - MVT XLenVT = Subtarget->getXLenVT(); RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); - SDValue Chain = Node->getOperand(0); - SDValue Glue; - SmallVector Operands; Operands.push_back(StoreVal); unsigned CurOp = 2 + NF; - SDValue Base; - SelectBaseAddr(Node->getOperand(CurOp++), Base); - Operands.push_back(Base); // Base pointer. - Operands.push_back(Node->getOperand(CurOp++)); // Index. - MVT IndexVT = Operands.back()->getSimpleValueType(0); - if (IsMasked) { - // Mask needs to be copied to V0. - SDValue Mask = Node->getOperand(CurOp++); - Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue()); - Glue = Chain.getValue(1); - Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType())); - } - SDValue VL; - selectVLOp(Node->getOperand(CurOp++), VL); - Operands.push_back(VL); - Operands.push_back(SEW); - Operands.push_back(Chain); // Chain. - if (Glue) - Operands.push_back(Glue); + + MVT IndexVT; + addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, + /*IsStridedOrIndexed*/ true, Operands, &IndexVT); assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); @@ -670,35 +610,16 @@ MVT VT = Node->getSimpleValueType(0); unsigned ScalarSize = VT.getScalarSizeInBits(); - MVT XLenVT = Subtarget->getXLenVT(); - SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); - - SDValue Chain = Node->getOperand(0); - SDValue Glue; unsigned CurOp = 2; SmallVector Operands; if (IsMasked) Operands.push_back(Node->getOperand(CurOp++)); - SDValue Base; - SelectBaseAddr(Node->getOperand(CurOp++), Base); - Operands.push_back(Base); // Base pointer. - Operands.push_back(Node->getOperand(CurOp++)); // Index. - MVT IndexVT = Operands.back()->getSimpleValueType(0); - if (IsMasked) { - // Mask needs to be copied to V0. - SDValue Mask = Node->getOperand(CurOp++); - Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue()); - Glue = Chain.getValue(1); - Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType())); - } - SDValue VL; - selectVLOp(Node->getOperand(CurOp++), VL); - Operands.push_back(VL); - Operands.push_back(SEW); - Operands.push_back(Chain); // Chain. - if (Glue) - Operands.push_back(Glue); + + MVT IndexVT; + addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, + /*IsStridedOrIndexed*/ true, Operands, + &IndexVT); assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); @@ -730,37 +651,16 @@ MVT VT = Node->getSimpleValueType(0); unsigned ScalarSize = VT.getScalarSizeInBits(); - MVT XLenVT = Subtarget->getXLenVT(); // VLE1 uses an SEW of 8. unsigned SEWImm = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize; - SDValue SEW = CurDAG->getTargetConstant(SEWImm, DL, XLenVT); - - SDValue Chain = Node->getOperand(0); - SDValue Glue; unsigned CurOp = 2; SmallVector Operands; if (IsMasked) Operands.push_back(Node->getOperand(CurOp++)); - SDValue Base; - SelectBaseAddr(Node->getOperand(CurOp++), Base); - Operands.push_back(Base); // Base pointer. - if (IsStrided) - Operands.push_back(Node->getOperand(CurOp++)); // Stride. - if (IsMasked) { - // Mask needs to be copied to V0. - SDValue Mask = Node->getOperand(CurOp++); - Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue()); - Glue = Chain.getValue(1); - Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType())); - } - SDValue VL; - selectVLOp(Node->getOperand(CurOp++), VL); - Operands.push_back(VL); - Operands.push_back(SEW); - Operands.push_back(Chain); // Chain. - if (Glue) - Operands.push_back(Glue); + + addVectorLoadStoreOperands(Node, SEWImm, DL, CurOp, IsMasked, IsStrided, + Operands); RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = @@ -781,33 +681,14 @@ MVT VT = Node->getSimpleValueType(0); unsigned ScalarSize = VT.getScalarSizeInBits(); - MVT XLenVT = Subtarget->getXLenVT(); - SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); - - SDValue Chain = Node->getOperand(0); - SDValue Glue; unsigned CurOp = 2; SmallVector Operands; if (IsMasked) Operands.push_back(Node->getOperand(CurOp++)); - SDValue Base; - SelectBaseAddr(Node->getOperand(CurOp++), Base); - Operands.push_back(Base); // Base pointer. - if (IsMasked) { - // Mask needs to be copied to V0. - SDValue Mask = Node->getOperand(CurOp++); - Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue()); - Glue = Chain.getValue(1); - Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType())); - } - SDValue VL; - selectVLOp(Node->getOperand(CurOp++), VL); - Operands.push_back(VL); - Operands.push_back(SEW); - Operands.push_back(Chain); // Chain. - if (Glue) - Operands.push_back(Glue); + + addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, + /*IsStridedOrIndexed*/ false, Operands); RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = @@ -921,34 +802,15 @@ MVT VT = Node->getOperand(2)->getSimpleValueType(0); unsigned ScalarSize = VT.getScalarSizeInBits(); - MVT XLenVT = Subtarget->getXLenVT(); - SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); - - SDValue Chain = Node->getOperand(0); - SDValue Glue; unsigned CurOp = 2; SmallVector Operands; Operands.push_back(Node->getOperand(CurOp++)); // Store value. - SDValue Base; - SelectBaseAddr(Node->getOperand(CurOp++), Base); - Operands.push_back(Base); // Base pointer. - Operands.push_back(Node->getOperand(CurOp++)); // Index. - MVT IndexVT = Operands.back()->getSimpleValueType(0); - if (IsMasked) { - // Mask needs to be copied to V0. - SDValue Mask = Node->getOperand(CurOp++); - Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue()); - Glue = Chain.getValue(1); - Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType())); - } - SDValue VL; - selectVLOp(Node->getOperand(CurOp++), VL); - Operands.push_back(VL); - Operands.push_back(SEW); - Operands.push_back(Node->getOperand(0)); // Chain. - if (Glue) - Operands.push_back(Glue); + + MVT IndexVT; + addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, + /*IsStridedOrIndexed*/ true, Operands, + &IndexVT); assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); @@ -980,43 +842,21 @@ MVT VT = Node->getOperand(2)->getSimpleValueType(0); unsigned ScalarSize = VT.getScalarSizeInBits(); - MVT XLenVT = Subtarget->getXLenVT(); // VSE1 uses an SEW of 8. unsigned SEWImm = (IntNo == Intrinsic::riscv_vse1) ? 8 : ScalarSize; - SDValue SEW = CurDAG->getTargetConstant(SEWImm, DL, XLenVT); - - SDValue Chain = Node->getOperand(0); - SDValue Glue; unsigned CurOp = 2; SmallVector Operands; Operands.push_back(Node->getOperand(CurOp++)); // Store value. - SDValue Base; - SelectBaseAddr(Node->getOperand(CurOp++), Base); - Operands.push_back(Base); // Base pointer. - if (IsStrided) - Operands.push_back(Node->getOperand(CurOp++)); // Stride. - if (IsMasked) { - // Mask needs to be copied to V0. - SDValue Mask = Node->getOperand(CurOp++); - Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue()); - Glue = Chain.getValue(1); - Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType())); - } - SDValue VL; - selectVLOp(Node->getOperand(CurOp++), VL); - Operands.push_back(VL); - Operands.push_back(SEW); - Operands.push_back(Node->getOperand(0)); // Chain. - if (Glue) - Operands.push_back(Glue); + + addVectorLoadStoreOperands(Node, SEWImm, DL, CurOp, IsMasked, IsStrided, + Operands); RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VSEPseudo *P = RISCV::getVSEPseudo( IsMasked, IsStrided, ScalarSize, static_cast(LMUL)); MachineSDNode *Store = CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); - if (auto *MemOp = dyn_cast(Node)) CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});