diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -73,11 +73,11 @@ return selectRVVUimm5(N, Width, Imm); } - void selectVLSEG(SDNode *Node, unsigned IntNo, bool IsMasked, bool IsStrided); + void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided); void selectVLSEGFF(SDNode *Node, bool IsMasked); - void selectVLXSEG(SDNode *Node, unsigned IntNo, bool IsMasked); - void selectVSSEG(SDNode *Node, unsigned IntNo, bool IsMasked, bool IsStrided); - void selectVSXSEG(SDNode *Node, unsigned IntNo, bool IsMasked); + void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered); + void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided); + void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered); // Include the pieces autogenerated from the target description. #include "RISCVGenDAGISel.inc" @@ -85,6 +85,54 @@ private: void doPeepholeLoadStoreADDI(); }; -} + +namespace RISCV { +struct VLSEGPseudo { + uint8_t NF; + uint8_t Masked; + uint8_t Strided; + uint8_t FF; + uint8_t SEW; + uint8_t LMUL; + uint16_t Pseudo; +}; + +struct VLXSEGPseudo { + uint8_t NF; + uint8_t Masked; + uint8_t Ordered; + uint8_t SEW; + uint8_t LMUL; + uint8_t IndexLMUL; + uint16_t Pseudo; +}; + +struct VSSEGPseudo { + uint8_t NF; + uint8_t Masked; + uint8_t Strided; + uint8_t SEW; + uint8_t LMUL; + uint16_t Pseudo; +}; + +struct VSXSEGPseudo { + uint8_t NF; + uint8_t Masked; + uint8_t Ordered; + uint8_t SEW; + uint8_t LMUL; + uint8_t IndexLMUL; + uint16_t Pseudo; +}; + +#define GET_RISCVVSSEGTable_DECL +#define GET_RISCVVLSEGTable_DECL +#define GET_RISCVVLXSEGTable_DECL +#define GET_RISCVVSXSEGTable_DECL +#include "RISCVGenSearchableTables.inc" +} // namespace RISCV + +} // namespace llvm #endif diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -25,21 +25,15 @@ #define DEBUG_TYPE "riscv-isel" -namespace RISCVZvlssegTable { -struct RISCVZvlsseg { - unsigned IntrinsicID; - uint8_t SEW; - uint8_t LMUL; - uint8_t IndexLMUL; - uint16_t Pseudo; -}; - -using namespace RISCV; - -#define GET_RISCVZvlssegTable_IMPL +namespace llvm { +namespace RISCV { +#define GET_RISCVVSSEGTable_IMPL +#define GET_RISCVVLSEGTable_IMPL +#define GET_RISCVVLXSEGTable_IMPL +#define GET_RISCVVSXSEGTable_IMPL #include "RISCVGenSearchableTables.inc" - -} // namespace RISCVZvlssegTable +} // namespace RISCV +} // namespace llvm void RISCVDAGToDAGISel::PostprocessISelDAG() { doPeepholeLoadStoreADDI(); @@ -184,7 +178,7 @@ } } -void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo, bool IsMasked, +void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided) { SDLoc DL(Node); unsigned NF = Node->getNumValues() - 1; @@ -210,9 +204,9 @@ Operands.push_back(Node->getOperand(CurOp++)); // VL. Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. - const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( - IntNo, ScalarSize, static_cast(LMUL), - static_cast(RISCVVLMUL::LMUL_1)); + const RISCV::VLSEGPseudo *P = + RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, ScalarSize, + static_cast(LMUL)); SDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); SDValue SuperReg = SDValue(Load, 0); @@ -227,7 +221,6 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) { SDLoc DL(Node); - unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain. MVT VT = Node->getSimpleValueType(0); MVT XLenVT = Subtarget->getXLenVT(); @@ -250,9 +243,9 @@ Operands.push_back(Node->getOperand(CurOp++)); // VL. Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. - const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( - IntNo, ScalarSize, static_cast(LMUL), - static_cast(RISCVVLMUL::LMUL_1)); + const RISCV::VLSEGPseudo *P = + RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true, + ScalarSize, static_cast(LMUL)); SDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, MVT::Glue, Operands); SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, @@ -269,8 +262,8 @@ CurDAG->RemoveDeadNode(Node); } -void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned IntNo, - bool IsMasked) { +void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked, + bool IsOrdered) { SDLoc DL(Node); unsigned NF = Node->getNumValues() - 1; MVT VT = Node->getSimpleValueType(0); @@ -298,8 +291,8 @@ RISCVVLMUL IndexLMUL = getLMUL(IndexVT); unsigned IndexScalarSize = IndexVT.getScalarSizeInBits(); - const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( - IntNo, IndexScalarSize, static_cast(LMUL), + const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo( + NF, IsMasked, IsOrdered, IndexScalarSize, static_cast(LMUL), static_cast(IndexLMUL)); SDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); @@ -313,7 +306,7 @@ CurDAG->RemoveDeadNode(Node); } -void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo, bool IsMasked, +void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided) { SDLoc DL(Node); unsigned NF = Node->getNumOperands() - 4; @@ -339,16 +332,15 @@ Operands.push_back(Node->getOperand(CurOp++)); // VL. Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. - const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( - IntNo, ScalarSize, static_cast(LMUL), - static_cast(RISCVVLMUL::LMUL_1)); + const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo( + NF, IsMasked, IsStrided, ScalarSize, static_cast(LMUL)); SDNode *Store = CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); ReplaceNode(Node, Store); } -void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned IntNo, - bool IsMasked) { +void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked, + bool IsOrdered) { SDLoc DL(Node); unsigned NF = Node->getNumOperands() - 5; if (IsMasked) @@ -374,8 +366,8 @@ RISCVVLMUL IndexLMUL = getLMUL(IndexVT); unsigned IndexScalarSize = IndexVT.getScalarSizeInBits(); - const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( - IntNo, IndexScalarSize, static_cast(LMUL), + const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo( + NF, IsMasked, IsOrdered, IndexScalarSize, static_cast(LMUL), static_cast(IndexLMUL)); SDNode *Store = CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); @@ -575,7 +567,7 @@ case Intrinsic::riscv_vlseg6: case Intrinsic::riscv_vlseg7: case Intrinsic::riscv_vlseg8: { - selectVLSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ false); + selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false); return; } case Intrinsic::riscv_vlseg2_mask: @@ -585,7 +577,7 @@ case Intrinsic::riscv_vlseg6_mask: case Intrinsic::riscv_vlseg7_mask: case Intrinsic::riscv_vlseg8_mask: { - selectVLSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ false); + selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false); return; } case Intrinsic::riscv_vlsseg2: @@ -595,7 +587,7 @@ case Intrinsic::riscv_vlsseg6: case Intrinsic::riscv_vlsseg7: case Intrinsic::riscv_vlsseg8: { - selectVLSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ true); + selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true); return; } case Intrinsic::riscv_vlsseg2_mask: @@ -605,7 +597,7 @@ case Intrinsic::riscv_vlsseg6_mask: case Intrinsic::riscv_vlsseg7_mask: case Intrinsic::riscv_vlsseg8_mask: { - selectVLSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ true); + selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true); return; } case Intrinsic::riscv_vloxseg2: @@ -615,16 +607,17 @@ case Intrinsic::riscv_vloxseg6: case Intrinsic::riscv_vloxseg7: case Intrinsic::riscv_vloxseg8: + selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true); + return; case Intrinsic::riscv_vluxseg2: case Intrinsic::riscv_vluxseg3: case Intrinsic::riscv_vluxseg4: case Intrinsic::riscv_vluxseg5: case Intrinsic::riscv_vluxseg6: case Intrinsic::riscv_vluxseg7: - case Intrinsic::riscv_vluxseg8: { - selectVLXSEG(Node, IntNo, /*IsMasked*/ false); + case Intrinsic::riscv_vluxseg8: + selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false); return; - } case Intrinsic::riscv_vloxseg2_mask: case Intrinsic::riscv_vloxseg3_mask: case Intrinsic::riscv_vloxseg4_mask: @@ -632,16 +625,17 @@ case Intrinsic::riscv_vloxseg6_mask: case Intrinsic::riscv_vloxseg7_mask: case Intrinsic::riscv_vloxseg8_mask: + selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true); + return; case Intrinsic::riscv_vluxseg2_mask: case Intrinsic::riscv_vluxseg3_mask: case Intrinsic::riscv_vluxseg4_mask: case Intrinsic::riscv_vluxseg5_mask: case Intrinsic::riscv_vluxseg6_mask: case Intrinsic::riscv_vluxseg7_mask: - case Intrinsic::riscv_vluxseg8_mask: { - selectVLXSEG(Node, IntNo, /*IsMasked*/ true); + case Intrinsic::riscv_vluxseg8_mask: + selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false); return; - } case Intrinsic::riscv_vlseg8ff: case Intrinsic::riscv_vlseg7ff: case Intrinsic::riscv_vlseg6ff: @@ -675,7 +669,7 @@ case Intrinsic::riscv_vsseg6: case Intrinsic::riscv_vsseg7: case Intrinsic::riscv_vsseg8: { - selectVSSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ false); + selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false); return; } case Intrinsic::riscv_vsseg2_mask: @@ -685,7 +679,7 @@ case Intrinsic::riscv_vsseg6_mask: case Intrinsic::riscv_vsseg7_mask: case Intrinsic::riscv_vsseg8_mask: { - selectVSSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ false); + selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false); return; } case Intrinsic::riscv_vssseg2: @@ -695,7 +689,7 @@ case Intrinsic::riscv_vssseg6: case Intrinsic::riscv_vssseg7: case Intrinsic::riscv_vssseg8: { - selectVSSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ true); + selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true); return; } case Intrinsic::riscv_vssseg2_mask: @@ -705,7 +699,7 @@ case Intrinsic::riscv_vssseg6_mask: case Intrinsic::riscv_vssseg7_mask: case Intrinsic::riscv_vssseg8_mask: { - selectVSSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ true); + selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true); return; } case Intrinsic::riscv_vsoxseg2: @@ -715,16 +709,17 @@ case Intrinsic::riscv_vsoxseg6: case Intrinsic::riscv_vsoxseg7: case Intrinsic::riscv_vsoxseg8: + selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true); + return; case Intrinsic::riscv_vsuxseg2: case Intrinsic::riscv_vsuxseg3: case Intrinsic::riscv_vsuxseg4: case Intrinsic::riscv_vsuxseg5: case Intrinsic::riscv_vsuxseg6: case Intrinsic::riscv_vsuxseg7: - case Intrinsic::riscv_vsuxseg8: { - selectVSXSEG(Node, IntNo, /*IsMasked*/ false); + case Intrinsic::riscv_vsuxseg8: + selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false); return; - } case Intrinsic::riscv_vsoxseg2_mask: case Intrinsic::riscv_vsoxseg3_mask: case Intrinsic::riscv_vsoxseg4_mask: @@ -732,17 +727,18 @@ case Intrinsic::riscv_vsoxseg6_mask: case Intrinsic::riscv_vsoxseg7_mask: case Intrinsic::riscv_vsoxseg8_mask: + selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true); + return; case Intrinsic::riscv_vsuxseg2_mask: case Intrinsic::riscv_vsuxseg3_mask: case Intrinsic::riscv_vsuxseg4_mask: case Intrinsic::riscv_vsuxseg5_mask: case Intrinsic::riscv_vsuxseg6_mask: case Intrinsic::riscv_vsuxseg7_mask: - case Intrinsic::riscv_vsuxseg8_mask: { - selectVSXSEG(Node, IntNo, /*IsMasked*/ true); + case Intrinsic::riscv_vsuxseg8_mask: + selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false); return; } - } break; } case ISD::BITCAST: diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -413,19 +413,75 @@ let PrimaryKeyName = "getRISCVVIntrinsicInfo"; } -class RISCVZvlsseg S, bits<3> L, bits<3> IL = V_M1.value> { - Intrinsic IntrinsicID = !cast(IntrName); +class RISCVVLSEG N, bit M, bit Str, bit F, bits<7> S, bits<3> L> { + bits<4> NF = N; + bits<1> Masked = M; + bits<1> Strided = Str; + bits<1> FF = F; + bits<7> SEW = S; + bits<3> LMUL = L; + Pseudo Pseudo = !cast(NAME); +} + +def RISCVVLSEGTable : GenericTable { + let FilterClass = "RISCVVLSEG"; + let CppTypeName = "VLSEGPseudo"; + let Fields = ["NF", "Masked", "Strided", "FF", "SEW", "LMUL", "Pseudo"]; + let PrimaryKey = ["NF", "Masked", "Strided", "FF", "SEW", "LMUL"]; + let PrimaryKeyName = "getVLSEGPseudo"; +} + +class RISCVVLXSEG N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> { + bits<4> NF = N; + bits<1> Masked = M; + bits<1> Ordered = O; bits<7> SEW = S; bits<3> LMUL = L; bits<3> IndexLMUL = IL; Pseudo Pseudo = !cast(NAME); } -def RISCVZvlssegTable : GenericTable { - let FilterClass = "RISCVZvlsseg"; - let Fields = ["IntrinsicID", "SEW", "LMUL", "IndexLMUL", "Pseudo"]; - let PrimaryKey = ["IntrinsicID", "SEW", "LMUL", "IndexLMUL"]; - let PrimaryKeyName = "getPseudo"; +def RISCVVLXSEGTable : GenericTable { + let FilterClass = "RISCVVLXSEG"; + let CppTypeName = "VLXSEGPseudo"; + let Fields = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"]; + let PrimaryKey = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"]; + let PrimaryKeyName = "getVLXSEGPseudo"; +} + +class RISCVVSSEG N, bit M, bit Str, bits<7> S, bits<3> L> { + bits<4> NF = N; + bits<1> Masked = M; + bits<1> Strided = Str; + bits<7> SEW = S; + bits<3> LMUL = L; + Pseudo Pseudo = !cast(NAME); +} + +def RISCVVSSEGTable : GenericTable { + let FilterClass = "RISCVVSSEG"; + let CppTypeName = "VSSEGPseudo"; + let Fields = ["NF", "Masked", "Strided", "SEW", "LMUL", "Pseudo"]; + let PrimaryKey = ["NF", "Masked", "Strided", "SEW", "LMUL"]; + let PrimaryKeyName = "getVSSEGPseudo"; +} + +class RISCVVSXSEG N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> { + bits<4> NF = N; + bits<1> Masked = M; + bits<1> Ordered = O; + bits<7> SEW = S; + bits<3> LMUL = L; + bits<3> IndexLMUL = IL; + Pseudo Pseudo = !cast(NAME); +} + +def RISCVVSXSEGTable : GenericTable { + let FilterClass = "RISCVVSXSEG"; + let CppTypeName = "VSXSEGPseudo"; + let Fields = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"]; + let PrimaryKey = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"]; + let PrimaryKeyName = "getVSXSEGPseudo"; } //===----------------------------------------------------------------------===// @@ -455,35 +511,6 @@ !subst("Pseudo", "", PseudoInst)))))))))))))))))))); } -class ToLowerCase { - string L = !subst("FF", "ff", - !subst("VLSEG", "vlseg", - !subst("VLSSEG", "vlsseg", - !subst("VSSEG", "vsseg", - !subst("VSSSEG", "vssseg", - !subst("VLOXSEG", "vloxseg", - !subst("VLUXSEG", "vluxseg", - !subst("VSOXSEG", "vsoxseg", - !subst("VSUXSEG", "vsuxseg", Upper))))))))); -} - -// Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2 -// Example: PseudoVLSEG2E32_V_M2_MASK -> int_riscv_vlseg2_mask -class PseudoToIntrinsic { - string Intrinsic = !strconcat("int_riscv_", - ToLowerCase< - !subst("E8", "", - !subst("E16", "", - !subst("E32", "", - !subst("E64", "", - !subst("EI8", "", - !subst("EI16", "", - !subst("EI32", "", - !subst("EI64", "", - !subst("_V", "", PseudoToVInst.VInst)))))))))>.L, - !if(IsMasked, "_mask", "")); -} - // The destination vector register group for a masked vector instruction cannot // overlap the source mask register (v0), unless the destination vector register // is being written with a mask value (e.g., comparisons) or the scalar result @@ -1013,11 +1040,11 @@ defm "EI" # eew : VPseudoAMOEI; } -class VPseudoUSSegLoadNoMask EEW>: +class VPseudoUSSegLoadNoMask EEW, bits<4> NF, bit isFF>: Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul> { + RISCVVLSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1029,12 +1056,12 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSSegLoadMask EEW>: +class VPseudoUSSegLoadMask EEW, bits<4> NF, bit isFF>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul> { + RISCVVLSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1047,11 +1074,12 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoSSegLoadNoMask EEW>: +class VPseudoSSegLoadNoMask EEW, bits<4> NF>: Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, GPR:$offset, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul> { + RISCVVLSEG { + let mayLoad = 1; let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1063,12 +1091,12 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoSSegLoadMask EEW>: +class VPseudoSSegLoadMask EEW, bits<4> NF>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, GPR:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul> { + RISCVVLSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1081,11 +1109,12 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoISegLoadNoMask EEW, bits<3> LMUL>: +class VPseudoISegLoadNoMask EEW, bits<3> LMUL, + bits<4> NF, bit Ordered>: Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, IdxClass:$offset, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul, LMUL> { + RISCVVLXSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1100,12 +1129,13 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoISegLoadMask EEW, bits<3> LMUL>: +class VPseudoISegLoadMask EEW, bits<3> LMUL, + bits<4> NF, bit Ordered>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, IdxClass:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul, LMUL> { + RISCVVLXSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1120,11 +1150,11 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSSegStoreNoMask EEW>: +class VPseudoUSSegStoreNoMask EEW, bits<4> NF>: Pseudo<(outs), (ins ValClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul> { + RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -1136,12 +1166,12 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSSegStoreMask EEW>: +class VPseudoUSSegStoreMask EEW, bits<4> NF>: Pseudo<(outs), (ins ValClass:$rd, GPR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul> { + RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -1152,11 +1182,11 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoSSegStoreNoMask EEW>: +class VPseudoSSegStoreNoMask EEW, bits<4> NF>: Pseudo<(outs), (ins ValClass:$rd, GPR:$rs1, GPR: $offset, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul> { + RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -1168,12 +1198,12 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoSSegStoreMask EEW>: +class VPseudoSSegStoreMask EEW, bits<4> NF>: Pseudo<(outs), (ins ValClass:$rd, GPR:$rs1, GPR: $offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul> { + RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -1184,12 +1214,13 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoISegStoreNoMask EEW, bits<3> LMUL>: +class VPseudoISegStoreNoMask EEW, bits<3> LMUL, + bits<4> NF, bit Ordered>: Pseudo<(outs), (ins ValClass:$rd, GPR:$rs1, IdxClass: $index, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul, LMUL> { + RISCVVSXSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -1201,12 +1232,13 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoISegStoreMask EEW, bits<3> LMUL>: +class VPseudoISegStoreMask EEW, bits<3> LMUL, + bits<4> NF, bit Ordered>: Pseudo<(outs), (ins ValClass:$rd, GPR:$rs1, IdxClass: $index, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVZvlsseg.Intrinsic, EEW, VLMul, LMUL> { + RISCVVSXSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -1889,9 +1921,9 @@ defvar vreg = SegRegClass.RC; defvar FFStr = !if(isFF, "FF", ""); def nf # "E" # eew # FFStr # "_V_" # LInfo : - VPseudoUSSegLoadNoMask; + VPseudoUSSegLoadNoMask; def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" : - VPseudoUSSegLoadMask; + VPseudoUSSegLoadMask; } } } @@ -1905,15 +1937,15 @@ let VLMul = lmul.value in { foreach nf = NFSet.L in { defvar vreg = SegRegClass.RC; - def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask; - def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask; + def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask; + def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask; } } } } } -multiclass VPseudoISegLoad { +multiclass VPseudoISegLoad { foreach idx_eew = EEWList in { // EEW for index argument. foreach idx_lmul = MxSet.m in { // LMUL for index argument. foreach val_lmul = MxList.m in { // LMUL for the value. @@ -1924,9 +1956,9 @@ foreach nf = NFSet.L in { defvar ValVreg = SegRegClass.RC; def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo : - VPseudoISegLoadNoMask; + VPseudoISegLoadNoMask; def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" : - VPseudoISegLoadMask; + VPseudoISegLoadMask; } } } @@ -1941,8 +1973,8 @@ let VLMul = lmul.value in { foreach nf = NFSet.L in { defvar vreg = SegRegClass.RC; - def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask; - def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask; + def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask; + def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask; } } } @@ -1956,15 +1988,15 @@ let VLMul = lmul.value in { foreach nf = NFSet.L in { defvar vreg = SegRegClass.RC; - def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask; - def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask; + def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask; + def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask; } } } } } -multiclass VPseudoISegStore { +multiclass VPseudoISegStore { foreach idx_eew = EEWList in { // EEW for index argument. foreach idx_lmul = MxSet.m in { // LMUL for index argument. foreach val_lmul = MxList.m in { // LMUL for the value. @@ -1975,9 +2007,9 @@ foreach nf = NFSet.L in { defvar ValVreg = SegRegClass.RC; def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo : - VPseudoISegStoreNoMask; + VPseudoISegStoreNoMask; def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" : - VPseudoISegStoreMask; + VPseudoISegStoreMask; } } } @@ -3235,12 +3267,12 @@ //===----------------------------------------------------------------------===// defm PseudoVLSEG : VPseudoUSSegLoad; defm PseudoVLSSEG : VPseudoSSegLoad; -defm PseudoVLOXSEG : VPseudoISegLoad; -defm PseudoVLUXSEG : VPseudoISegLoad; +defm PseudoVLOXSEG : VPseudoISegLoad; +defm PseudoVLUXSEG : VPseudoISegLoad; defm PseudoVSSEG : VPseudoUSSegStore; defm PseudoVSSSEG : VPseudoSSegStore; -defm PseudoVSOXSEG : VPseudoISegStore; -defm PseudoVSUXSEG : VPseudoISegStore; +defm PseudoVSOXSEG : VPseudoISegStore; +defm PseudoVSUXSEG : VPseudoISegStore; // vlsegeff.v may update VL register let hasSideEffects = 1, Defs = [VL] in