diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -126,6 +126,23 @@ uint16_t Pseudo; }; +struct VLEPseudo { + uint8_t Masked; + uint8_t Strided; + uint8_t FF; + uint8_t SEW; + uint8_t LMUL; + uint16_t Pseudo; +}; + +struct VSEPseudo { + uint8_t Masked; + uint8_t Strided; + uint8_t SEW; + uint8_t LMUL; + uint16_t Pseudo; +}; + struct VLX_VSXPseudo { uint8_t Masked; uint8_t Ordered; @@ -139,6 +156,8 @@ #define GET_RISCVVLSEGTable_DECL #define GET_RISCVVLXSEGTable_DECL #define GET_RISCVVSXSEGTable_DECL +#define GET_RISCVVLETable_DECL +#define GET_RISCVVSETable_DECL #define GET_RISCVVLXTable_DECL #define GET_RISCVVSXTable_DECL #include "RISCVGenSearchableTables.inc" diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -32,6 +32,8 @@ #define GET_RISCVVLSEGTable_IMPL #define GET_RISCVVLXSEGTable_IMPL #define GET_RISCVVSXSEGTable_IMPL +#define GET_RISCVVLETable_IMPL +#define GET_RISCVVSETable_IMPL #define GET_RISCVVLXTable_IMPL #define GET_RISCVVSXTable_IMPL #include "RISCVGenSearchableTables.inc" @@ -646,6 +648,94 @@ ReplaceNode(Node, Load); return; } + case Intrinsic::riscv_vle1: + case Intrinsic::riscv_vle: + case Intrinsic::riscv_vle_mask: + case Intrinsic::riscv_vlse: + case Intrinsic::riscv_vlse_mask: { + bool IsMasked = IntNo == Intrinsic::riscv_vle_mask || + IntNo == Intrinsic::riscv_vlse_mask; + bool IsStrided = + IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask; + + SDLoc DL(Node); + MVT VT = Node->getSimpleValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + // VLE1 uses an SEW of 8. + unsigned SEWImm = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize; + SDValue SEW = CurDAG->getTargetConstant(SEWImm, DL, XLenVT); + + unsigned CurOp = 2; + SmallVector Operands; + if (IsMasked) + Operands.push_back(Node->getOperand(CurOp++)); + Operands.push_back(Node->getOperand(CurOp++)); // Base pointer. + if (IsStrided) + Operands.push_back(Node->getOperand(CurOp++)); // Stride. + if (IsMasked) + Operands.push_back(Node->getOperand(CurOp++)); // Mask. + SDValue VL; + selectVLOp(Node->getOperand(CurOp++), VL); + Operands.push_back(VL); + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); // Chain. + + RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + const RISCV::VLEPseudo *P = + RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, ScalarSize, + static_cast(LMUL)); + MachineSDNode *Load = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); + + if (auto *MemOp = dyn_cast(Node)) + CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); + + ReplaceNode(Node, Load); + return; + } + case Intrinsic::riscv_vleff: + case Intrinsic::riscv_vleff_mask: { + bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask; + + SDLoc DL(Node); + MVT VT = Node->getSimpleValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + + unsigned CurOp = 2; + SmallVector Operands; + if (IsMasked) + Operands.push_back(Node->getOperand(CurOp++)); + Operands.push_back(Node->getOperand(CurOp++)); // Base pointer. + if (IsMasked) + Operands.push_back(Node->getOperand(CurOp++)); // Mask. + SDValue VL; + selectVLOp(Node->getOperand(CurOp++), VL); + Operands.push_back(VL); + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); // Chain. + + RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + const RISCV::VLEPseudo *P = + RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, + ScalarSize, static_cast(LMUL)); + MachineSDNode *Load = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), + MVT::Other, MVT::Glue, Operands); + SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, + /*Glue*/ SDValue(Load, 2)); + + if (auto *MemOp = dyn_cast(Node)) + CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); + + ReplaceUses(SDValue(Node, 0), SDValue(Load, 0)); + ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL + ReplaceUses(SDValue(Node, 2), SDValue(Load, 1)); // Chain + CurDAG->RemoveDeadNode(Node); + return; + } } break; } @@ -775,6 +865,50 @@ ReplaceNode(Node, Store); return; } + case Intrinsic::riscv_vse1: + case Intrinsic::riscv_vse: + case Intrinsic::riscv_vse_mask: + case Intrinsic::riscv_vsse: + case Intrinsic::riscv_vsse_mask: { + bool IsMasked = IntNo == Intrinsic::riscv_vse_mask || + IntNo == Intrinsic::riscv_vsse_mask; + bool IsStrided = + IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask; + + SDLoc DL(Node); + MVT VT = Node->getOperand(2)->getSimpleValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + // VSE1 uses an SEW of 8. + unsigned SEWImm = (IntNo == Intrinsic::riscv_vse1) ? 8 : ScalarSize; + SDValue SEW = CurDAG->getTargetConstant(SEWImm, DL, XLenVT); + + unsigned CurOp = 2; + SmallVector Operands; + Operands.push_back(Node->getOperand(CurOp++)); // Store value. + Operands.push_back(Node->getOperand(CurOp++)); // Base pointer. + if (IsStrided) + Operands.push_back(Node->getOperand(CurOp++)); // Stride. + if (IsMasked) + Operands.push_back(Node->getOperand(CurOp++)); // Mask. + SDValue VL; + selectVLOp(Node->getOperand(CurOp++), VL); + Operands.push_back(VL); + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); // Chain. + + RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + const RISCV::VSEPseudo *P = RISCV::getVSEPseudo( + IsMasked, IsStrided, ScalarSize, static_cast(LMUL)); + MachineSDNode *Store = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); + + if (auto *MemOp = dyn_cast(Node)) + CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()}); + + ReplaceNode(Node, Store); + return; + } } break; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -107,9 +107,6 @@ READ_VLENB, // Truncates a RVV integer vector by one power-of-two. TRUNCATE_VECTOR, - // Unit-stride fault-only-first load - VLEFF, - VLEFF_MASK, // Matches the semantics of vslideup/vslidedown. The first operand is the // pass-thru operand, the second is the source vector, the third is the // XLenVT index (either constant or non-constant), the fourth is the mask diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -805,22 +805,26 @@ } RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) { - switch (VT.getSizeInBits().getKnownMinValue() / 8) { + unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); + if (VT.getVectorElementType() == MVT::i1) + KnownSize *= 8; + + switch (KnownSize) { default: llvm_unreachable("Invalid LMUL."); - case 1: + case 8: return RISCVVLMUL::LMUL_F8; - case 2: + case 16: return RISCVVLMUL::LMUL_F4; - case 4: + case 32: return RISCVVLMUL::LMUL_F2; - case 8: + case 64: return RISCVVLMUL::LMUL_1; - case 16: + case 128: return RISCVVLMUL::LMUL_2; - case 32: + case 256: return RISCVVLMUL::LMUL_4; - case 64: + case 512: return RISCVVLMUL::LMUL_8; } } @@ -2116,33 +2120,7 @@ } } - switch (IntNo) { - default: - return SDValue(); // Don't custom lower most intrinsics. - case Intrinsic::riscv_vleff: { - SDLoc DL(Op); - SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other, MVT::Glue); - SDValue Load = DAG.getNode(RISCVISD::VLEFF, DL, VTs, Op.getOperand(0), - Op.getOperand(2), Op.getOperand(3)); - SDValue ReadVL = - SDValue(DAG.getMachineNode(RISCV::PseudoReadVL, DL, Op->getValueType(1), - Load.getValue(2)), - 0); - return DAG.getMergeValues({Load, ReadVL, Load.getValue(1)}, DL); - } - case Intrinsic::riscv_vleff_mask: { - SDLoc DL(Op); - SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other, MVT::Glue); - SDValue Load = DAG.getNode(RISCVISD::VLEFF_MASK, DL, VTs, Op.getOperand(0), - Op.getOperand(2), Op.getOperand(3), - Op.getOperand(4), Op.getOperand(5)); - SDValue ReadVL = - SDValue(DAG.getMachineNode(RISCV::PseudoReadVL, DL, Op->getValueType(1), - Load.getValue(2)), - 0); - return DAG.getMergeValues({Load, ReadVL, Load.getValue(1)}, DL); - } - } + return SDValue(); // Don't custom lower most intrinsics. } static std::pair @@ -5252,8 +5230,6 @@ NODE_NAME_CASE(SPLAT_VECTOR_I64) NODE_NAME_CASE(READ_VLENB) NODE_NAME_CASE(TRUNCATE_VECTOR) - NODE_NAME_CASE(VLEFF) - NODE_NAME_CASE(VLEFF_MASK) NODE_NAME_CASE(VSLIDEUP_VL) NODE_NAME_CASE(VSLIDEDOWN_VL) NODE_NAME_CASE(VID_VL) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -20,20 +20,6 @@ def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB", SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>; -def riscv_vleff : SDNode<"RISCVISD::VLEFF", - SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>, - SDTCisVT<2, XLenVT>]>, - [SDNPHasChain, SDNPOutGlue, SDNPMayLoad, - SDNPSideEffect]>; -def riscv_vleff_mask : SDNode<"RISCVISD::VLEFF_MASK", - SDTypeProfile<1, 4, [SDTCisVec<0>, - SDTCisSameAs<0, 1>, - SDTCisPtrTy<2>, - SDTCVecEltisVT<3, i1>, - SDTCisVT<4, XLenVT>]>, - [SDNPHasChain, SDNPOutGlue, SDNPMayLoad, - SDNPSideEffect]>; - // X0 has special meaning for vsetvl/vsetvli. // rd | rs1 | AVL value | Effect on vl //-------------------------------------------------------------- @@ -413,6 +399,39 @@ let PrimaryKeyName = "getRISCVVIntrinsicInfo"; } +class RISCVVLE S, bits<3> L> { + bits<1> Masked = M; + bits<1> Strided = Str; + bits<1> FF = F; + bits<7> SEW = S; + bits<3> LMUL = L; + Pseudo Pseudo = !cast(NAME); +} + +def RISCVVLETable : GenericTable { + let FilterClass = "RISCVVLE"; + let CppTypeName = "VLEPseudo"; + let Fields = ["Masked", "Strided", "FF", "SEW", "LMUL", "Pseudo"]; + let PrimaryKey = ["Masked", "Strided", "FF", "SEW", "LMUL"]; + let PrimaryKeyName = "getVLEPseudo"; +} + +class RISCVVSE S, bits<3> L> { + bits<1> Masked = M; + bits<1> Strided = Str; + bits<7> SEW = S; + bits<3> LMUL = L; + Pseudo Pseudo = !cast(NAME); +} + +def RISCVVSETable : GenericTable { + let FilterClass = "RISCVVSE"; + let CppTypeName = "VSEPseudo"; + let Fields = ["Masked", "Strided", "SEW", "LMUL", "Pseudo"]; + let PrimaryKey = ["Masked", "Strided", "SEW", "LMUL"]; + let PrimaryKeyName = "getVSEPseudo"; +} + class RISCVVLX_VSX S, bits<3> L, bits<3> IL> { bits<1> Masked = M; bits<1> Ordered = O; @@ -580,10 +599,11 @@ let VLMul = m.value; } -class VPseudoUSLoadNoMask: +class VPseudoUSLoadNoMask EEW, bit isFF> : Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVLE { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -595,12 +615,13 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSLoadMask: +class VPseudoUSLoadMask EEW, bit isFF> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVLE { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -613,10 +634,11 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoSLoadNoMask: +class VPseudoSLoadNoMask EEW>: Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, GPR:$rs2, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVLE { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -628,12 +650,13 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoSLoadMask: +class VPseudoSLoadMask EEW>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVLE { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -683,10 +706,11 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSStoreNoMask: +class VPseudoUSStoreNoMask EEW>: Pseudo<(outs), (ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -698,10 +722,11 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSStoreMask: +class VPseudoUSStoreMask EEW>: Pseudo<(outs), (ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -712,10 +737,11 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoSStoreNoMask: +class VPseudoSStoreNoMask EEW>: Pseudo<(outs), (ins StClass:$rd, GPR:$rs1, GPR:$rs2, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -727,10 +753,11 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoSStoreMask: +class VPseudoSStoreMask EEW>: Pseudo<(outs), (ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, - RISCVVPseudo { + RISCVVPseudo, + RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; @@ -1294,8 +1321,10 @@ defvar vreg = lmul.vrclass; defvar FFStr = !if(isFF, "FF", ""); let VLMul = lmul.value in { - def "E" # eew # FFStr # "_V_" # LInfo : VPseudoUSLoadNoMask; - def "E" # eew # FFStr # "_V_" # LInfo # "_MASK" : VPseudoUSLoadMask; + def "E" # eew # FFStr # "_V_" # LInfo : + VPseudoUSLoadNoMask; + def "E" # eew # FFStr # "_V_" # LInfo # "_MASK" : + VPseudoUSLoadMask; } } } @@ -1304,7 +1333,7 @@ multiclass VPseudoLoadMask { foreach mti = AllMasks in { let VLMul = mti.LMul.value in { - def "_V_" # mti.BX : VPseudoUSLoadNoMask; + def "_V_" # mti.BX : VPseudoUSLoadNoMask; } } } @@ -1315,8 +1344,8 @@ defvar LInfo = lmul.MX; defvar vreg = lmul.vrclass; let VLMul = lmul.value in { - def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask; - def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSLoadMask; + def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask; + def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSLoadMask; } } } @@ -1353,8 +1382,8 @@ defvar LInfo = lmul.MX; defvar vreg = lmul.vrclass; let VLMul = lmul.value in { - def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask; - def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask; + def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask; + def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask; } } } @@ -1363,7 +1392,7 @@ multiclass VPseudoStoreMask { foreach mti = AllMasks in { let VLMul = mti.LMul.value in { - def "_V_" # mti.BX : VPseudoUSStoreNoMask; + def "_V_" # mti.BX : VPseudoUSStoreNoMask; } } } @@ -1374,8 +1403,8 @@ defvar LInfo = lmul.MX; defvar vreg = lmul.vrclass; let VLMul = lmul.value in { - def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask; - def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask; + def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask; + def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask; } } } @@ -2295,99 +2324,6 @@ $rs1, $vs2, $vd, (mask_type V0), GPR:$vl, sew)>; -multiclass VPatUSLoad -{ - defvar Intr = !cast(intrinsic); - defvar Pseudo = !cast(inst#"_V_"#vlmul.MX); - def : Pat<(type (Intr GPR:$rs1, (XLenVT (VLOp GPR:$vl)))), - (Pseudo $rs1, GPR:$vl, sew)>; - defvar IntrMask = !cast(intrinsic # "_mask"); - defvar PseudoMask = !cast(inst#"_V_"#vlmul.MX#"_MASK"); - def : Pat<(type (IntrMask (type GetVRegNoV0.R:$merge), - GPR:$rs1, (mask_type V0), (XLenVT (VLOp GPR:$vl)))), - (PseudoMask $merge, - $rs1, (mask_type V0), GPR:$vl, sew)>; -} - -multiclass VPatUSLoadFF -{ - defvar Pseudo = !cast(inst#"_V_"#vlmul.MX); - def : Pat<(type (riscv_vleff GPR:$rs1, (XLenVT (VLOp GPR:$vl)))), - (Pseudo $rs1, GPR:$vl, sew)>; - defvar PseudoMask = !cast(inst#"_V_"#vlmul.MX#"_MASK"); - def : Pat<(type (riscv_vleff_mask (type GetVRegNoV0.R:$merge), - GPR:$rs1, (mask_type V0), (XLenVT (VLOp GPR:$vl)))), - (PseudoMask $merge, - $rs1, (mask_type V0), GPR:$vl, sew)>; -} - -multiclass VPatSLoad -{ - defvar Intr = !cast(intrinsic); - defvar Pseudo = !cast(inst#"_V_"#vlmul.MX); - def : Pat<(type (Intr GPR:$rs1, GPR:$rs2, (XLenVT (VLOp GPR:$vl)))), - (Pseudo $rs1, $rs2, GPR:$vl, sew)>; - defvar IntrMask = !cast(intrinsic # "_mask"); - defvar PseudoMask = !cast(inst#"_V_"#vlmul.MX#"_MASK"); - def : Pat<(type (IntrMask (type GetVRegNoV0.R:$merge), - GPR:$rs1, GPR:$rs2, (mask_type V0), (XLenVT (VLOp GPR:$vl)))), - (PseudoMask $merge, - $rs1, $rs2, (mask_type V0), GPR:$vl, sew)>; -} - -multiclass VPatUSStore -{ - defvar Intr = !cast(intrinsic); - defvar Pseudo = !cast(inst#"_V_"#vlmul.MX); - def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, (XLenVT (VLOp GPR:$vl))), - (Pseudo $rs3, $rs1, GPR:$vl, sew)>; - defvar IntrMask = !cast(intrinsic # "_mask"); - defvar PseudoMask = !cast(inst#"_V_"#vlmul.MX#"_MASK"); - def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, (mask_type V0), (XLenVT (VLOp GPR:$vl))), - (PseudoMask $rs3, $rs1, (mask_type V0), GPR:$vl, sew)>; -} - -multiclass VPatSStore -{ - defvar Intr = !cast(intrinsic); - defvar Pseudo = !cast(inst#"_V_"#vlmul.MX); - def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, (XLenVT (VLOp GPR:$vl))), - (Pseudo $rs3, $rs1, $rs2, GPR:$vl, sew)>; - defvar IntrMask = !cast(intrinsic # "_mask"); - defvar PseudoMask = !cast(inst#"_V_"#vlmul.MX#"_MASK"); - def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, (mask_type V0), (XLenVT (VLOp GPR:$vl))), - (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), GPR:$vl, sew)>; -} - multiclass VPatUnaryS_M { @@ -3809,52 +3745,6 @@ //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { - -//===----------------------------------------------------------------------===// -// 7. Vector Loads and Stores -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// 7.4 Vector Unit-Stride Instructions -//===----------------------------------------------------------------------===// - -foreach vti = AllVectors in -{ - defm : VPatUSLoad<"int_riscv_vle", - "PseudoVLE" # vti.SEW, - vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; - defm : VPatUSLoadFF<"PseudoVLE" # vti.SEW # "FF", - vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; - defm : VPatUSStore<"int_riscv_vse", - "PseudoVSE" # vti.SEW, - vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; -} - -foreach vti = AllMasks in { - defvar PseudoVLE1 = !cast("PseudoVLE1_V_"#vti.BX); - def : Pat<(vti.Mask (int_riscv_vle1 GPR:$rs1, (XLenVT (VLOp GPR:$vl)))), - (PseudoVLE1 $rs1, GPR:$vl, vti.SEW)>; - defvar PseudoVSE1 = !cast("PseudoVSE1_V_"#vti.BX); - def : Pat<(int_riscv_vse1 (vti.Mask VR:$rs3), GPR:$rs1, (XLenVT (VLOp GPR:$vl))), - (PseudoVSE1 $rs3, $rs1, GPR:$vl, vti.SEW)>; -} - -//===----------------------------------------------------------------------===// -// 7.5 Vector Strided Instructions -//===----------------------------------------------------------------------===// - -foreach vti = AllVectors in -{ - defm : VPatSLoad<"int_riscv_vlse", - "PseudoVLSE" # vti.SEW, - vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; - defm : VPatSStore<"int_riscv_vsse", - "PseudoVSSE" # vti.SEW, - vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; -} - -} // Predicates = [HasStdExtV] //===----------------------------------------------------------------------===// // 8. Vector AMO Operations