diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -126,10 +126,21 @@ uint16_t Pseudo; }; +struct VLX_VSXPseudo { + uint8_t Masked; + uint8_t Ordered; + uint8_t SEW; + uint8_t LMUL; + uint8_t IndexLMUL; + uint16_t Pseudo; +}; + #define GET_RISCVVSSEGTable_DECL #define GET_RISCVVLSEGTable_DECL #define GET_RISCVVLXSEGTable_DECL #define GET_RISCVVSXSEGTable_DECL +#define GET_RISCVVLXTable_DECL +#define GET_RISCVVSXTable_DECL #include "RISCVGenSearchableTables.inc" } // namespace RISCV diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -31,6 +31,8 @@ #define GET_RISCVVLSEGTable_IMPL #define GET_RISCVVLXSEGTable_IMPL #define GET_RISCVVSXSEGTable_IMPL +#define GET_RISCVVLXTable_IMPL +#define GET_RISCVVSXTable_IMPL #include "RISCVGenSearchableTables.inc" } // namespace RISCV } // namespace llvm @@ -656,6 +658,50 @@ selectVLSEGFF(Node, /*IsMasked*/ true); return; } + case Intrinsic::riscv_vloxei: + case Intrinsic::riscv_vloxei_mask: + case Intrinsic::riscv_vluxei: + case Intrinsic::riscv_vluxei_mask: { + bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask || + IntNo == Intrinsic::riscv_vluxei_mask; + bool IsOrdered = IntNo == Intrinsic::riscv_vloxei || + IntNo == Intrinsic::riscv_vloxei_mask; + + SDLoc DL(Node); + MVT VT = Node->getSimpleValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + + unsigned CurOp = 2; + SmallVector Operands; + if (IsMasked) + Operands.push_back(Node->getOperand(CurOp++)); + Operands.push_back(Node->getOperand(CurOp++)); // Base pointer. + Operands.push_back(Node->getOperand(CurOp++)); // Index. + MVT IndexVT = Operands.back()->getSimpleValueType(0); + if (IsMasked) + Operands.push_back(Node->getOperand(CurOp++)); // Mask. + SDValue VL; + selectVLOp(Node->getOperand(CurOp++), VL); + Operands.push_back(VL); + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); // Chain. + + assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && + "Element count mismatch"); + + RISCVVLMUL LMUL = getLMUL(VT); + RISCVVLMUL IndexLMUL = getLMUL(IndexVT); + unsigned IndexScalarSize = IndexVT.getScalarSizeInBits(); + const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo( + IsMasked, IsOrdered, IndexScalarSize, static_cast(LMUL), + static_cast(IndexLMUL)); + SDNode *Load = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); + ReplaceNode(Node, Load); + return; + } } break; } @@ -738,6 +784,49 @@ case Intrinsic::riscv_vsuxseg8_mask: selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false); return; + case Intrinsic::riscv_vsoxei: + case Intrinsic::riscv_vsoxei_mask: + case Intrinsic::riscv_vsuxei: + case Intrinsic::riscv_vsuxei_mask: { + bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask || + IntNo == Intrinsic::riscv_vsuxei_mask; + bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei || + IntNo == Intrinsic::riscv_vsoxei_mask; + + SDLoc DL(Node); + MVT VT = Node->getOperand(2)->getSimpleValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + + unsigned CurOp = 2; + SmallVector Operands; + Operands.push_back(Node->getOperand(CurOp++)); // Store value. + Operands.push_back(Node->getOperand(CurOp++)); // Base pointer. + Operands.push_back(Node->getOperand(CurOp++)); // Index. + MVT IndexVT = Operands.back()->getSimpleValueType(0); + if (IsMasked) + Operands.push_back(Node->getOperand(CurOp++)); // Mask. + SDValue VL; + selectVLOp(Node->getOperand(CurOp++), VL); + Operands.push_back(VL); + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); // Chain. + + assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && + "Element count mismatch"); + + RISCVVLMUL LMUL = getLMUL(VT); + RISCVVLMUL IndexLMUL = getLMUL(IndexVT); + unsigned IndexScalarSize = IndexVT.getScalarSizeInBits(); + const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo( + IsMasked, IsOrdered, IndexScalarSize, static_cast(LMUL), + static_cast(IndexLMUL)); + SDNode *Store = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); + ReplaceNode(Node, Store); + return; + } } break; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -413,6 +413,40 @@ let PrimaryKeyName = "getRISCVVIntrinsicInfo"; } +class RISCVVLX S, bits<3> L, bits<3> IL> { + bits<1> Masked = M; + bits<1> Ordered = O; + bits<7> SEW = S; + bits<3> LMUL = L; + bits<3> IndexLMUL = IL; + Pseudo Pseudo = !cast(NAME); +} + +def RISCVVLXTable : GenericTable { + let FilterClass = "RISCVVLX"; + let CppTypeName = "VLX_VSXPseudo"; + let Fields = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"]; + let PrimaryKey = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"]; + let PrimaryKeyName = "getVLXPseudo"; +} + +class RISCVVSX S, bits<3> L, bits<3> IL> { + bits<1> Masked = M; + bits<1> Ordered = O; + bits<7> SEW = S; + bits<3> LMUL = L; + bits<3> IndexLMUL = IL; + Pseudo Pseudo = !cast(NAME); +} + +def RISCVVSXTable : GenericTable { + let FilterClass = "RISCVVSX"; + let CppTypeName = "VLX_VSXPseudo"; + let Fields = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"]; + let PrimaryKey = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"]; + let PrimaryKeyName = "getVSXPseudo"; +} + class RISCVVLSEG N, bit M, bit Str, bit F, bits<7> S, bits<3> L> { bits<4> NF = N; bits<1> Masked = M; @@ -616,10 +650,12 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoILoadNoMask: +class VPseudoILoadNoMask EEW, bits<3> LMUL, + bit Ordered>: Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVLX { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -631,12 +667,14 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoILoadMask: +class VPseudoILoadMask EEW, bits<3> LMUL, + bit Ordered>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVLX { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -877,10 +915,12 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoIStoreNoMask: +class VPseudoIStoreNoMask EEW, bits<3> LMUL, + bit Ordered>: Pseudo<(outs), (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVSX { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -892,10 +932,12 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoIStoreMask: +class VPseudoIStoreMask EEW, bits<3> LMUL, + bit Ordered>: Pseudo<(outs), (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVSX { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -1284,7 +1326,7 @@ } } -multiclass VPseudoILoad { +multiclass VPseudoILoad { foreach eew = EEWList in { foreach lmul = MxList.m in foreach idx_lmul = MxSet.m in { @@ -1293,8 +1335,10 @@ defvar IdxLInfo = idx_lmul.MX; defvar IdxVreg = idx_lmul.vrclass; let VLMul = lmul.value in { - def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo : VPseudoILoadNoMask; - def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : VPseudoILoadMask; + def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo : + VPseudoILoadNoMask; + def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : + VPseudoILoadMask; } } } @@ -1334,7 +1378,7 @@ } } -multiclass VPseudoIStore { +multiclass VPseudoIStore { foreach eew = EEWList in { foreach lmul = MxList.m in foreach idx_lmul = MxSet.m in { @@ -1344,9 +1388,9 @@ defvar IdxVreg = idx_lmul.vrclass; let VLMul = lmul.value in { def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo : - VPseudoIStoreNoMask; + VPseudoIStoreNoMask; def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : - VPseudoIStoreMask; + VPseudoIStoreMask; } } } @@ -3249,10 +3293,10 @@ //===----------------------------------------------------------------------===// // Vector Indexed Loads and Stores -defm PseudoVLUX : VPseudoILoad; -defm PseudoVLOX : VPseudoILoad; -defm PseudoVSOX : VPseudoIStore; -defm PseudoVSUX : VPseudoIStore; +defm PseudoVLUX : VPseudoILoad; +defm PseudoVLOX : VPseudoILoad; +defm PseudoVSOX : VPseudoIStore; +defm PseudoVSUX : VPseudoIStore; //===----------------------------------------------------------------------===// // 7.7. Unit-stride Fault-Only-First Loads @@ -3830,45 +3874,6 @@ vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; } -//===----------------------------------------------------------------------===// -// 7.6 Vector Indexed Instructions -//===----------------------------------------------------------------------===// - -foreach vti = AllVectors in -foreach eew = EEWList in { - defvar vlmul = vti.LMul; - defvar octuple_lmul = octuple_from_str.ret; - defvar log_sew = shift_amount.val; - // The data vector register group has EEW=SEW, EMUL=LMUL, while the offset - // vector register group has EEW encoding in the instruction and EMUL=(EEW/SEW)*LMUL. - // calculate octuple elmul which is (eew * octuple_lmul) >> log_sew - defvar octuple_elmul = !srl(!mul(eew, octuple_lmul), log_sew); - // legal octuple elmul should be more than 0 and less than equal 64 - if !gt(octuple_elmul, 0) then { - if !le(octuple_elmul, 64) then { - defvar elmul_str = octuple_to_str.ret; - defvar elmul =!cast("V_" # elmul_str); - defvar idx_vti = !cast("VI" # eew # elmul_str); - - defm : VPatILoad<"int_riscv_vluxei", - "PseudoVLUXEI"#eew, - vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, - vlmul, elmul, vti.RegClass, idx_vti.RegClass>; - defm : VPatILoad<"int_riscv_vloxei", - "PseudoVLOXEI"#eew, - vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, - vlmul, elmul, vti.RegClass, idx_vti.RegClass>; - defm : VPatIStore<"int_riscv_vsoxei", - "PseudoVSOXEI"#eew, - vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, - vlmul, elmul, vti.RegClass, idx_vti.RegClass>; - defm : VPatIStore<"int_riscv_vsuxei", - "PseudoVSUXEI"#eew, - vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, - vlmul, elmul, vti.RegClass, idx_vti.RegClass>; - } - } -} } // Predicates = [HasStdExtV] //===----------------------------------------------------------------------===//