diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -470,6 +470,24 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // For unit stride segment load + // Input: (pointer, vl) + class RISCVUSSegLoad + : Intrinsic, + !add(nf, -1))), + [LLVMPointerToElt<0>, llvm_anyint_ty], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For unit stride segment load with mask + // Input: (maskedoff, pointer, mask, vl) + class RISCVUSSegLoadMask + : Intrinsic, + !add(nf, -1))), + !listconcat(!listsplat(LLVMMatchType<0>, nf), + [LLVMPointerToElt<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty]), + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask; @@ -568,6 +586,10 @@ def "int_riscv_" #NAME :RISCVConversionNoMask; def "int_riscv_" # NAME # "_mask" : RISCVConversionMask; } + multiclass RISCVUSSegLoad { + def "int_riscv_" # NAME : RISCVUSSegLoad; + def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask; + } defm vle : RISCVUSLoad; defm vleff : RISCVUSLoad; @@ -849,4 +871,8 @@ llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + foreach nf = [2, 3, 4, 5, 6, 7, 8] in { + defm vlseg # nf : RISCVUSSegLoad; + } + } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -55,6 +55,9 @@ bool selectVSplatSimm5(SDValue N, SDValue &SplatVal); bool selectVSplatUimm5(SDValue N, SDValue &SplatVal); + void selectVLSEG(SDNode *Node, unsigned IntNo); + void selectVLSEGMask(SDNode *Node, unsigned IntNo); + // Include the pieces autogenerated from the target description. #include "RISCVGenDAGISel.inc" diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -49,6 +49,161 @@ return Result; } +static RISCVVLMUL getLMUL(EVT VT) { + switch (VT.getSizeInBits().getKnownMinValue() / 8) { + default: + llvm_unreachable("Invalid LMUL."); + case 1: + return RISCVVLMUL::LMUL_F8; + case 2: + return RISCVVLMUL::LMUL_F4; + case 4: + return RISCVVLMUL::LMUL_F2; + case 8: + return RISCVVLMUL::LMUL_1; + case 16: + return RISCVVLMUL::LMUL_2; + case 32: + return RISCVVLMUL::LMUL_4; + case 64: + return RISCVVLMUL::LMUL_8; + } +} + +static unsigned getSubregIndexByEVT(EVT VT, unsigned Index) { + RISCVVLMUL LMUL = getLMUL(VT); + if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 || + LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) { + static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, + "Unexpected subreg numbering"); + return RISCV::sub_vrm1_0 + Index; + } else if (LMUL == RISCVVLMUL::LMUL_2) { + static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, + "Unexpected subreg numbering"); + return RISCV::sub_vrm2_0 + Index; + } else if (LMUL == RISCVVLMUL::LMUL_4) { + static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, + "Unexpected subreg numbering"); + return RISCV::sub_vrm4_0 + Index; + } + llvm_unreachable("Invalid vector type."); +} + +static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef Regs, + unsigned RegClassID, unsigned SubReg0) { + assert(Regs.size() >= 2 && Regs.size() <= 8); + + SDLoc DL(Regs[0]); + SmallVector Ops; + + Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32)); + + for (unsigned I = 0; I < Regs.size(); ++I) { + Ops.push_back(Regs[I]); + Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32)); + } + SDNode *N = + CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops); + return SDValue(N, 0); +} + +static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef Regs, + unsigned NF) { + static const unsigned RegClassIDs[] = { + RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID, + RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID, + RISCV::VRN8M1RegClassID}; + + return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0); +} + +static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef Regs, + unsigned NF) { + static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID, + RISCV::VRN3M2RegClassID, + RISCV::VRN4M2RegClassID}; + + return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0); +} + +static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef Regs, + unsigned NF) { + return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID, + RISCV::sub_vrm4_0); +} + +static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef Regs, + unsigned NF, RISCVVLMUL LMUL) { + switch (LMUL) { + default: + llvm_unreachable("Invalid LMUL."); + case RISCVVLMUL::LMUL_F8: + case RISCVVLMUL::LMUL_F4: + case RISCVVLMUL::LMUL_F2: + case RISCVVLMUL::LMUL_1: + return createM1Tuple(CurDAG, Regs, NF); + case RISCVVLMUL::LMUL_2: + return createM2Tuple(CurDAG, Regs, NF); + case RISCVVLMUL::LMUL_4: + return createM4Tuple(CurDAG, Regs, NF); + } +} + +void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo) { + SDLoc DL(Node); + unsigned NF = Node->getNumValues() - 1; + EVT VT = Node->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SDValue Operands[] = {Node->getOperand(2), // Base pointer. + Node->getOperand(3), // VL. + SEW, Node->getOperand(0)}; // Chain + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, ScalarSize, static_cast(LMUL)); + SDNode *Load = + CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); + SDValue SuperReg = SDValue(Load, 0); + for (unsigned I = 0; I < NF; ++I) + ReplaceUses(SDValue(Node, I), + CurDAG->getTargetExtractSubreg(getSubregIndexByEVT(VT, I), DL, + VT, SuperReg)); + + ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); + CurDAG->RemoveDeadNode(Node); +} + +void RISCVDAGToDAGISel::selectVLSEGMask(SDNode *Node, unsigned IntNo) { + SDLoc DL(Node); + unsigned NF = Node->getNumValues() - 1; + EVT VT = Node->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); + SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); + SDValue Operands[] = {MaskedOff, + Node->getOperand(NF + 2), // Base pointer. + Node->getOperand(NF + 3), // Mask. + Node->getOperand(NF + 4), // VL. + SEW, + Node->getOperand(0)}; // Chain. + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, ScalarSize, static_cast(LMUL)); + SDNode *Load = + CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); + SDValue SuperReg = SDValue(Load, 0); + for (unsigned I = 0; I < NF; ++I) + ReplaceUses(SDValue(Node, I), + CurDAG->getTargetExtractSubreg(getSubregIndexByEVT(VT, I), DL, + VT, SuperReg)); + + ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); + CurDAG->RemoveDeadNode(Node); +} + void RISCVDAGToDAGISel::Select(SDNode *Node) { // If we have a custom node, we have already selected. if (Node->isMachineOpcode()) { @@ -171,6 +326,26 @@ /* Chain */ Node->getOperand(0))); return; } + case Intrinsic::riscv_vlseg2: + case Intrinsic::riscv_vlseg3: + case Intrinsic::riscv_vlseg4: + case Intrinsic::riscv_vlseg5: + case Intrinsic::riscv_vlseg6: + case Intrinsic::riscv_vlseg7: + case Intrinsic::riscv_vlseg8: { + selectVLSEG(Node, IntNo); + return; + } + case Intrinsic::riscv_vlseg2_mask: + case Intrinsic::riscv_vlseg3_mask: + case Intrinsic::riscv_vlseg4_mask: + case Intrinsic::riscv_vlseg5_mask: + case Intrinsic::riscv_vlseg6_mask: + case Intrinsic::riscv_vlseg7_mask: + case Intrinsic::riscv_vlseg8_mask: { + selectVLSEGMask(Node, IntNo); + return; + } } break; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -315,6 +315,22 @@ #include "RISCVGenSearchableTables.inc" } // end namespace RISCVVIntrinsicsTable + +namespace RISCVZvlssegTable { + +struct RISCVZvlsseg { + unsigned int IntrinsicID; + unsigned int SEW; + unsigned int LMUL; + unsigned int Pseudo; +}; + +using namespace RISCV; + +#define GET_RISCVZvlssegTable_DECL +#include "RISCVGenSearchableTables.inc" + +} // namespace RISCVZvlssegTable } #endif diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4163,4 +4163,11 @@ #include "RISCVGenSearchableTables.inc" } // namespace RISCVVIntrinsicsTable + +namespace RISCVZvlssegTable { + +#define GET_RISCVZvlssegTable_IMPL +#include "RISCVGenSearchableTables.inc" + +} // namespace RISCVZvlssegTable } // namespace llvm diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -73,6 +73,20 @@ list m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8]; } +class MxSet { + list m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8], + !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8], + !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8], + !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]); +} + +class NFSet { + list L = !cond(!eq(m.value, V_M8.value): [], + !eq(m.value, V_M4.value): [2], + !eq(m.value, V_M2.value): [2, 3, 4], + true: [2, 3, 4, 5, 6, 7, 8]); +} + class shift_amount { int val = !if(!eq(num, 1), 0, !add(1, shift_amount.val)); } @@ -95,6 +109,13 @@ !dag(type, [operand], [name])); } +class SegRegClass { + VReg RC = !cast("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX, + !eq(m.value, V_MF4.value): V_M1.MX, + !eq(m.value, V_MF2.value): V_M1.MX, + true: m.MX)); +} + //===----------------------------------------------------------------------===// // Vector register and vector group type information. //===----------------------------------------------------------------------===// @@ -351,6 +372,20 @@ let PrimaryKeyName = "getRISCVVIntrinsicInfo"; } +class RISCVZvlsseg S, bits<3> L> { + Intrinsic IntrinsicID = !cast(IntrName); + bits<11> SEW = S; + bits<3> LMUL = L; + Pseudo Pseudo = !cast(NAME); +} + +def RISCVZvlssegTable : GenericTable { + let FilterClass = "RISCVZvlsseg"; + let Fields = ["IntrinsicID", "SEW", "LMUL", "Pseudo"]; + let PrimaryKey = ["IntrinsicID", "SEW", "LMUL"]; + let PrimaryKeyName = "getPseudo"; +} + //===----------------------------------------------------------------------===// // Helpers to define the different pseudo instructions. //===----------------------------------------------------------------------===// @@ -374,6 +409,23 @@ !subst("Pseudo", "", PseudoInst)))))))))))))))); } +class ToLowerCase { + string L = !subst("VLSEG", "vlseg", Upper); +} + +// Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2 +// Example: PseudoVLSEG2E32_V_M2_MASK -> int_riscv_vlseg2_mask +class PseudoToIntrinsic { + string Intrinsic = !strconcat("int_riscv_", + ToLowerCase< + !subst("E8", "", + !subst("E16", "", + !subst("E32", "", + !subst("E64", "", + !subst("_V", "", PseudoToVInst.VInst)))))>.L, + !if(IsMasked, "_mask", "")); +} + // The destination vector register group for a masked vector instruction cannot // overlap the source mask register (v0), unless the destination vector register // is being written with a mask value (e.g., comparisons) or the scalar result @@ -826,6 +878,40 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoUSSegLoadNoMask EEW>: + Pseudo<(outs RetClass:$rd), + (ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSSegLoadMask EEW>: + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, GPR:$rs1, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = "$rd = $merge"; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + multiclass VPseudoUSLoad { foreach lmul = MxList.m in { defvar LInfo = lmul.MX; @@ -1334,6 +1420,21 @@ defm _W : VPseudoConversion; } +multiclass VPseudoUSSegLoad { + foreach eew = EEWList in { + foreach lmul = MxSet.m in { + defvar LInfo = lmul.MX; + let VLMul = lmul.value in { + foreach nf = NFSet.L in { + defvar vreg = SegRegClass.RC; + def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegLoadNoMask; + def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegLoadMask; + } + } + } + } +} + //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns. //===----------------------------------------------------------------------===// @@ -2460,6 +2561,15 @@ } //===----------------------------------------------------------------------===// +// 7.8. Vector Load/Store Segment Instructions +//===----------------------------------------------------------------------===// +defm PseudoVLSEG : VPseudoUSSegLoad; + +//===----------------------------------------------------------------------===// +// Pseudo Instructions +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// // 12. Vector Integer Arithmetic Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp --- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp +++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp @@ -176,7 +176,7 @@ if (RISCV::VRM2RegClass.contains(Reg) || RISCV::VRM4RegClass.contains(Reg) || RISCV::VRM8RegClass.contains(Reg)) { - Reg = TRI->getSubReg(Reg, RISCV::sub_vrm2); + Reg = TRI->getSubReg(Reg, RISCV::sub_vrm1_0); assert(Reg && "Subregister does not exist"); } diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -51,12 +51,21 @@ def ABIRegAltName : RegAltNameIndex; -def sub_vrm2 : SubRegIndex<64, -1>; -def sub_vrm2_hi : SubRegIndex<64, -1>; -def sub_vrm4 : SubRegIndex<128, -1>; -def sub_vrm4_hi : SubRegIndex<128, -1>; -def sub_vrm8 : SubRegIndex<256, -1>; -def sub_vrm8_hi : SubRegIndex<256, -1>; +def sub_vrm1_0 : SubRegIndex<64, -1>; +def sub_vrm1_1 : SubRegIndex<64, -1>; +def sub_vrm1_2 : SubRegIndex<64, -1>; +def sub_vrm1_3 : SubRegIndex<64, -1>; +def sub_vrm1_4 : SubRegIndex<64, -1>; +def sub_vrm1_5 : SubRegIndex<64, -1>; +def sub_vrm1_6 : SubRegIndex<64, -1>; +def sub_vrm1_7 : SubRegIndex<64, -1>; +def sub_vrm2_0 : SubRegIndex<128, -1>; +def sub_vrm2_1 : SubRegIndex<128, -1>; +def sub_vrm2_2 : SubRegIndex<128, -1>; +def sub_vrm2_3 : SubRegIndex<128, -1>; +def sub_vrm4_0 : SubRegIndex<256, -1>; +def sub_vrm4_1 : SubRegIndex<256, -1>; + } // Namespace = "RISCV" // Integer registers @@ -340,6 +349,65 @@ defvar vbool32_t = nxv2i1; defvar vbool64_t = nxv1i1; +// There is no need to define register classes for fractional LMUL. +def LMULList { + list m = [1, 2, 4, 8]; +} + +//===----------------------------------------------------------------------===// +// Utility classes for segment load/store. +//===----------------------------------------------------------------------===// +// The set of legal NF for LMUL = lmul. +// LMUL == 1, NF = 2, 3, 4, 5, 6, 7, 8 +// LMUL == 2, NF = 2, 3, 4 +// LMUL == 4, NF = 2 +class NFList { + list L = !cond(!eq(lmul, 1): [2, 3, 4, 5, 6, 7, 8], + !eq(lmul, 2): [2, 3, 4], + !eq(lmul, 4): [2], + !eq(lmul, 8): []); +} + +// Generate [start, end) SubRegIndex list. +class SubRegSet LIn, int start, int nf, int lmul> { + list L = !foldl([], + [0, 1, 2, 3, 4, 5, 6, 7], + AccList, i, + !listconcat(AccList, + !if(!lt(i, nf), + [!cast("sub_vrm" # lmul # "_" # i)], + []))); +} + +class IndexSet { + list R = + !foldl([], + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31], + L, i, + !listconcat(L, + !if(!and( + !le(!mul(index, lmul), !mul(i, lmul)), + !le(!mul(i, lmul), + !sub(!add(32, !mul(index, lmul)), !mul(nf, lmul))) + ), [!mul(i, lmul)], []))); +} + +class VRegList LIn, int start, int nf, int lmul> { + list L = + !if(!ge(start, nf), + LIn, + !listconcat( + [!dag(add, + !foreach(i, IndexSet.R, + !cast("V" # i # !cond(!eq(lmul, 2): "M2", + !eq(lmul, 4): "M4", + true: ""))), + !listsplat("", !size(IndexSet.R)))], + VRegList.L)); +} + // Vector registers let RegAltNameIndices = [ABIRegAltName] in { foreach Index = 0-31 in { @@ -353,7 +421,7 @@ !cast("V"#!add(Index, 1))], ["v"#Index]>, DwarfRegAlias("V"#Index)> { - let SubRegIndices = [sub_vrm2, sub_vrm2_hi]; + let SubRegIndices = [sub_vrm1_0, sub_vrm1_1]; } } @@ -363,7 +431,7 @@ !cast("V"#!add(Index, 2)#"M2")], ["v"#Index]>, DwarfRegAlias("V"#Index)> { - let SubRegIndices = [sub_vrm4, sub_vrm4_hi]; + let SubRegIndices = [sub_vrm2_0, sub_vrm2_1]; } } @@ -373,7 +441,7 @@ !cast("V"#!add(Index, 4)#"M4")], ["v"#Index]>, DwarfRegAlias("V"#Index)> { - let SubRegIndices = [sub_vrm8, sub_vrm8_hi]; + let SubRegIndices = [sub_vrm4_0, sub_vrm4_1]; } } @@ -383,6 +451,13 @@ def VXRM : RISCVReg<0, "vxrm", ["vxrm"]>; } +foreach m = [1, 2, 4] in { + foreach n = NFList.L in { + def "VN" # n # "M" # m: RegisterTuples.L, + VRegList<[], 0, n, m>.L>; + } +} + class VReg regTypes, dag regList, int Vlmul> : RegisterClass<"RISCV", regTypes, @@ -446,3 +521,11 @@ def VMV0 : RegisterClass<"RISCV", VMaskVTs, 64, (add V0)> { let Size = 64; } + +foreach m = LMULList.m in { + foreach nf = NFList.L in { + def "VRN" # nf # "M" # m : VReg<[untyped], + (add !cast("VN" # nf # "M" # m)), + !mul(nf, m)>; + } +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll @@ -0,0 +1,4723 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare {,} @llvm.riscv.vlseg2.nxv16i16(i16* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i32) + +define @test_vlseg2_nxv16i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv16i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i8(,, i8*, , i32) + +define @test_vlseg2_nxv1i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i8( %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1i8(i8* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i8(,,, i8*, , i32) + +define @test_vlseg3_nxv1i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i8( %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i8(,,,, i8*, , i32) + +define @test_vlseg4_nxv1i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8(,,,,, i8*, , i32) + +define @test_vlseg5_nxv1i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8(,,,,,, i8*, , i32) + +define @test_vlseg6_nxv1i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8(,,,,,,, i8*, , i32) + +define @test_vlseg7_nxv1i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8(,,,,,,,, i8*, , i32) + +define @test_vlseg8_nxv1i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv16i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv16i8(,, i8*, , i32) + +define @test_vlseg2_nxv16i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg2e8.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv16i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg2e8.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg2e8.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i8( %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv16i8(i8* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv16i8(,,, i8*, , i32) + +define @test_vlseg3_nxv16i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg3e8.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv16i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg3e8.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg3e8.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv16i8( %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv16i8(,,,, i8*, , i32) + +define @test_vlseg4_nxv16i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg4e8.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv16i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg4e8.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg4e8.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2i32(i32* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i32(,, i32*, , i32) + +define @test_vlseg2_nxv2i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i32( %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2i32(i32* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i32(,,, i32*, , i32) + +define @test_vlseg3_nxv2i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i32( %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i32(,,,, i32*, , i32) + +define @test_vlseg4_nxv2i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32(,,,,, i32*, , i32) + +define @test_vlseg5_nxv2i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv2i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32(,,,,,, i32*, , i32) + +define @test_vlseg6_nxv2i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv2i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32(,,,,,,, i32*, , i32) + +define @test_vlseg7_nxv2i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv2i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32(,,,,,,,, i32*, , i32) + +define @test_vlseg8_nxv2i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv2i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4i16(i16* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i16(,, i16*, , i32) + +define @test_vlseg2_nxv4i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i16( %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv4i16(i16* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i16(,,, i16*, , i32) + +define @test_vlseg3_nxv4i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv4i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i16( %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i16(,,,, i16*, , i32) + +define @test_vlseg4_nxv4i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv4i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16(,,,,, i16*, , i32) + +define @test_vlseg5_nxv4i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv4i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16(,,,,,, i16*, , i32) + +define @test_vlseg6_nxv4i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv4i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16(,,,,,,, i16*, , i32) + +define @test_vlseg7_nxv4i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv4i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16(,,,,,,,, i16*, , i32) + +define @test_vlseg8_nxv4i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv4i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1i32(i32* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i32(,, i32*, , i32) + +define @test_vlseg2_nxv1i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i32( %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1i32(i32* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i32(,,, i32*, , i32) + +define @test_vlseg3_nxv1i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i32( %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i32(,,,, i32*, , i32) + +define @test_vlseg4_nxv1i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32(,,,,, i32*, , i32) + +define @test_vlseg5_nxv1i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32(,,,,,, i32*, , i32) + +define @test_vlseg6_nxv1i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32(,,,,,,, i32*, , i32) + +define @test_vlseg7_nxv1i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32(,,,,,,,, i32*, , i32) + +define @test_vlseg8_nxv1i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv8i16(i16* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i16(,, i16*, , i32) + +define @test_vlseg2_nxv8i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv8i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i16( %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv8i16(i16* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8i16(,,, i16*, , i32) + +define @test_vlseg3_nxv8i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv8i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i16( %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i16(,,,, i16*, , i32) + +define @test_vlseg4_nxv8i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv8i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv8i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i8(,, i8*, , i32) + +define @test_vlseg2_nxv8i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv8i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i8( %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv8i8(i8* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8i8(,,, i8*, , i32) + +define @test_vlseg3_nxv8i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv8i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i8( %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i8(,,,, i8*, , i32) + +define @test_vlseg4_nxv8i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv8i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8(,,,,, i8*, , i32) + +define @test_vlseg5_nxv8i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv8i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8(,,,,,, i8*, , i32) + +define @test_vlseg6_nxv8i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv8i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8(,,,,,,, i8*, , i32) + +define @test_vlseg7_nxv8i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv8i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8(,,,,,,,, i8*, , i32) + +define @test_vlseg8_nxv8i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv8i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv8i32(i32* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i32(,, i32*, , i32) + +define @test_vlseg2_nxv8i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv8i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i32( %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i8(,, i8*, , i32) + +define @test_vlseg2_nxv4i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i8( %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv4i8(i8* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i8(,,, i8*, , i32) + +define @test_vlseg3_nxv4i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv4i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i8( %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i8(,,,, i8*, , i32) + +define @test_vlseg4_nxv4i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv4i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8(,,,,, i8*, , i32) + +define @test_vlseg5_nxv4i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv4i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8(,,,,,, i8*, , i32) + +define @test_vlseg6_nxv4i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv4i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8(,,,,,,, i8*, , i32) + +define @test_vlseg7_nxv4i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv4i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8(,,,,,,,, i8*, , i32) + +define @test_vlseg8_nxv4i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv4i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1i16(i16* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i16(,, i16*, , i32) + +define @test_vlseg2_nxv1i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i16( %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1i16(i16* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i16(,,, i16*, , i32) + +define @test_vlseg3_nxv1i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i16( %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i16(,,,, i16*, , i32) + +define @test_vlseg4_nxv1i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16(,,,,, i16*, , i32) + +define @test_vlseg5_nxv1i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16(,,,,,, i16*, , i32) + +define @test_vlseg6_nxv1i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16(,,,,,,, i16*, , i32) + +define @test_vlseg7_nxv1i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16(,,,,,,,, i16*, , i32) + +define @test_vlseg8_nxv1i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv32i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv32i8(,, i8*, , i32) + +define @test_vlseg2_nxv32i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlseg2e8.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv32i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlseg2e8.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlseg2e8.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv32i8( %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i8(,, i8*, , i32) + +define @test_vlseg2_nxv2i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i8( %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2i8(i8* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i8(,,, i8*, , i32) + +define @test_vlseg3_nxv2i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i8( %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i8(,,,, i8*, , i32) + +define @test_vlseg4_nxv2i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8(,,,,, i8*, , i32) + +define @test_vlseg5_nxv2i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv2i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8(,,,,,, i8*, , i32) + +define @test_vlseg6_nxv2i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv2i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8(,,,,,,, i8*, , i32) + +define @test_vlseg7_nxv2i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv2i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8(,,,,,,,, i8*, , i32) + +define @test_vlseg8_nxv2i8(i8* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv2i8(i8* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2i16(i16* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i16(,, i16*, , i32) + +define @test_vlseg2_nxv2i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i16( %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2i16(i16* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i16(,,, i16*, , i32) + +define @test_vlseg3_nxv2i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i16( %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i16(,,,, i16*, , i32) + +define @test_vlseg4_nxv2i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16(,,,,, i16*, , i32) + +define @test_vlseg5_nxv2i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv2i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16(,,,,,, i16*, , i32) + +define @test_vlseg6_nxv2i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv2i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16(,,,,,,, i16*, , i32) + +define @test_vlseg7_nxv2i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv2i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16(,,,,,,,, i16*, , i32) + +define @test_vlseg8_nxv2i16(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv2i16(i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4i32(i32* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i32(,, i32*, , i32) + +define @test_vlseg2_nxv4i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i32( %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv4i32(i32* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i32(,,, i32*, , i32) + +define @test_vlseg3_nxv4i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv4i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i32( %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i32(,,,, i32*, , i32) + +define @test_vlseg4_nxv4i32(i32* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv4i32(i32* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv16f16(half* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv16f16(,, half*, , i32) + +define @test_vlseg2_nxv16f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv16f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16f16( %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4f64(double* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f64(,, double*, , i32) + +define @test_vlseg2_nxv4f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlseg2e64.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlseg2e64.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlseg2e64.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f64( %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1f64(double* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f64(,, double*, , i32) + +define @test_vlseg2_nxv1f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg2e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg2e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg2e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f64( %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1f64(double* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f64(,,, double*, , i32) + +define @test_vlseg3_nxv1f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg3e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg3e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg3e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f64( %1, %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1f64(double* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f64(,,,, double*, , i32) + +define @test_vlseg4_nxv1f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg4e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg4e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg4e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64(,,,,, double*, , i32) + +define @test_vlseg5_nxv1f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg5e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg5e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg5e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64(,,,,,, double*, , i32) + +define @test_vlseg6_nxv1f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg6e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg6e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg6e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64(,,,,,,, double*, , i32) + +define @test_vlseg7_nxv1f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg7e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg7e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg7e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64(,,,,,,,, double*, , i32) + +define @test_vlseg8_nxv1f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg8e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg8e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg8e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2f32(float* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f32(,, float*, , i32) + +define @test_vlseg2_nxv2f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f32( %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2f32(float* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f32(,,, float*, , i32) + +define @test_vlseg3_nxv2f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f32( %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2f32(float* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f32(,,,, float*, , i32) + +define @test_vlseg4_nxv2f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32(,,,,, float*, , i32) + +define @test_vlseg5_nxv2f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv2f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32(,,,,,, float*, , i32) + +define @test_vlseg6_nxv2f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv2f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32(,,,,,,, float*, , i32) + +define @test_vlseg7_nxv2f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv2f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32(,,,,,,,, float*, , i32) + +define @test_vlseg8_nxv2f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv2f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1f16(half* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f16(,, half*, , i32) + +define @test_vlseg2_nxv1f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f16( %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1f16(half* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f16(,,, half*, , i32) + +define @test_vlseg3_nxv1f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f16( %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1f16(half* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f16(,,,, half*, , i32) + +define @test_vlseg4_nxv1f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16(,,,,, half*, , i32) + +define @test_vlseg5_nxv1f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16(,,,,,, half*, , i32) + +define @test_vlseg6_nxv1f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16(,,,,,,, half*, , i32) + +define @test_vlseg7_nxv1f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16(,,,,,,,, half*, , i32) + +define @test_vlseg8_nxv1f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1f32(float* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f32(,, float*, , i32) + +define @test_vlseg2_nxv1f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f32( %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1f32(float* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f32(,,, float*, , i32) + +define @test_vlseg3_nxv1f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f32( %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1f32(float* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f32(,,,, float*, , i32) + +define @test_vlseg4_nxv1f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32(,,,,, float*, , i32) + +define @test_vlseg5_nxv1f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32(,,,,,, float*, , i32) + +define @test_vlseg6_nxv1f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32(,,,,,,, float*, , i32) + +define @test_vlseg7_nxv1f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32(,,,,,,,, float*, , i32) + +define @test_vlseg8_nxv1f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv8f16(half* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv8f16(,, half*, , i32) + +define @test_vlseg2_nxv8f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv8f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f16( %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv8f16(half* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8f16(,,, half*, , i32) + +define @test_vlseg3_nxv8f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv8f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8f16( %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv8f16(half* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8f16(,,,, half*, , i32) + +define @test_vlseg4_nxv8f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv8f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv8f32(float* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv8f32(,, float*, , i32) + +define @test_vlseg2_nxv8f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv8f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f32( %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2f64(double* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f64(,, double*, , i32) + +define @test_vlseg2_nxv2f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg2e64.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg2e64.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg2e64.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f64( %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2f64(double* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f64(,,, double*, , i32) + +define @test_vlseg3_nxv2f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg3e64.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg3e64.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg3e64.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f64( %1, %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2f64(double* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f64(,,,, double*, , i32) + +define @test_vlseg4_nxv2f64(double* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg4e64.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2f64(double* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg4e64.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg4e64.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4f16(half* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f16(,, half*, , i32) + +define @test_vlseg2_nxv4f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f16( %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv4f16(half* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4f16(,,, half*, , i32) + +define @test_vlseg3_nxv4f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv4f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f16( %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv4f16(half* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f16(,,,, half*, , i32) + +define @test_vlseg4_nxv4f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv4f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16(,,,,, half*, , i32) + +define @test_vlseg5_nxv4f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv4f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16(,,,,,, half*, , i32) + +define @test_vlseg6_nxv4f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv4f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16(,,,,,,, half*, , i32) + +define @test_vlseg7_nxv4f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv4f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16(,,,,,,,, half*, , i32) + +define @test_vlseg8_nxv4f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv4f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2f16(half* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f16(,, half*, , i32) + +define @test_vlseg2_nxv2f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f16( %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2f16(half* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f16(,,, half*, , i32) + +define @test_vlseg3_nxv2f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f16( %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2f16(half* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f16(,,,, half*, , i32) + +define @test_vlseg4_nxv2f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16(,,,,, half*, , i32) + +define @test_vlseg5_nxv2f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv2f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16(,,,,,, half*, , i32) + +define @test_vlseg6_nxv2f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv2f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16(,,,,,,, half*, , i32) + +define @test_vlseg7_nxv2f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv2f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16(,,,,,,,, half*, , i32) + +define @test_vlseg8_nxv2f16(half* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv2f16(half* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4f32(float* , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f32(,, float*, , i32) + +define @test_vlseg2_nxv4f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f32( %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv4f32(float* , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4f32(,,, float*, , i32) + +define @test_vlseg3_nxv4f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv4f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f32( %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv4f32(float* , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f32(,,,, float*, , i32) + +define @test_vlseg4_nxv4f32(float* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv4f32(float* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll @@ -0,0 +1,5121 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare {,} @llvm.riscv.vlseg2.nxv16i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i64) + +define @test_vlseg2_nxv16i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv16i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4i32(i32* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i32(,, i32*, , i64) + +define @test_vlseg2_nxv4i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i32( %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv4i32(i32* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i32(,,, i32*, , i64) + +define @test_vlseg3_nxv4i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv4i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i32( %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i32(,,,, i32*, , i64) + +define @test_vlseg4_nxv4i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv4i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv16i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv16i8(,, i8*, , i64) + +define @test_vlseg2_nxv16i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg2e8.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv16i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg2e8.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg2e8.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i8( %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv16i8(i8* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv16i8(,,, i8*, , i64) + +define @test_vlseg3_nxv16i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg3e8.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv16i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg3e8.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg3e8.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv16i8( %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv16i8(,,,, i8*, , i64) + +define @test_vlseg4_nxv16i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg4e8.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv16i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg4e8.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg4e8.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1i64(i64* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i64(,, i64*, , i64) + +define @test_vlseg2_nxv1i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg2e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg2e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg2e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i64( %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1i64(i64* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i64(,,, i64*, , i64) + +define @test_vlseg3_nxv1i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg3e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg3e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg3e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i64( %1, %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1i64(i64* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i64(,,,, i64*, , i64) + +define @test_vlseg4_nxv1i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg4e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg4e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg4e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i64( %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1i64(i64* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i64(,,,,, i64*, , i64) + +define @test_vlseg5_nxv1i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg5e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg5e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg5e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i64( %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i64(i64* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i64(,,,,,, i64*, , i64) + +define @test_vlseg6_nxv1i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg6e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg6e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg6e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i64( %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(i64* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i64(,,,,,,, i64*, , i64) + +define @test_vlseg7_nxv1i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg7e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg7e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg7e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(i64* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i64(,,,,,,,, i64*, , i64) + +define @test_vlseg8_nxv1i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg8e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg8e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg8e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1i32(i32* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i32(,, i32*, , i64) + +define @test_vlseg2_nxv1i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i32( %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1i32(i32* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i32(,,, i32*, , i64) + +define @test_vlseg3_nxv1i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i32( %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i32(,,,, i32*, , i64) + +define @test_vlseg4_nxv1i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32(,,,,, i32*, , i64) + +define @test_vlseg5_nxv1i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32(,,,,,, i32*, , i64) + +define @test_vlseg6_nxv1i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32(,,,,,,, i32*, , i64) + +define @test_vlseg7_nxv1i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32(,,,,,,,, i32*, , i64) + +define @test_vlseg8_nxv1i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv8i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i16(,, i16*, , i64) + +define @test_vlseg2_nxv8i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv8i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i16( %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv8i16(i16* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8i16(,,, i16*, , i64) + +define @test_vlseg3_nxv8i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv8i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i16( %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i16(,,,, i16*, , i64) + +define @test_vlseg4_nxv8i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv8i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i8(,, i8*, , i64) + +define @test_vlseg2_nxv4i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i8( %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv4i8(i8* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i8(,,, i8*, , i64) + +define @test_vlseg3_nxv4i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv4i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i8( %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i8(,,,, i8*, , i64) + +define @test_vlseg4_nxv4i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv4i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8(,,,,, i8*, , i64) + +define @test_vlseg5_nxv4i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv4i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8(,,,,,, i8*, , i64) + +define @test_vlseg6_nxv4i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv4i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8(,,,,,,, i8*, , i64) + +define @test_vlseg7_nxv4i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv4i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8(,,,,,,,, i8*, , i64) + +define @test_vlseg8_nxv4i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv4i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i16(,, i16*, , i64) + +define @test_vlseg2_nxv1i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i16( %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1i16(i16* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i16(,,, i16*, , i64) + +define @test_vlseg3_nxv1i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i16( %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i16(,,,, i16*, , i64) + +define @test_vlseg4_nxv1i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16(,,,,, i16*, , i64) + +define @test_vlseg5_nxv1i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16(,,,,,, i16*, , i64) + +define @test_vlseg6_nxv1i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16(,,,,,,, i16*, , i64) + +define @test_vlseg7_nxv1i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16(,,,,,,,, i16*, , i64) + +define @test_vlseg8_nxv1i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2i32(i32* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i32(,, i32*, , i64) + +define @test_vlseg2_nxv2i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i32( %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2i32(i32* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i32(,,, i32*, , i64) + +define @test_vlseg3_nxv2i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i32( %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i32(,,,, i32*, , i64) + +define @test_vlseg4_nxv2i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32(,,,,, i32*, , i64) + +define @test_vlseg5_nxv2i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv2i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32(,,,,,, i32*, , i64) + +define @test_vlseg6_nxv2i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv2i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32(,,,,,,, i32*, , i64) + +define @test_vlseg7_nxv2i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv2i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32(,,,,,,,, i32*, , i64) + +define @test_vlseg8_nxv2i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv2i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv8i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i8(,, i8*, , i64) + +define @test_vlseg2_nxv8i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv8i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i8( %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv8i8(i8* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8i8(,,, i8*, , i64) + +define @test_vlseg3_nxv8i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv8i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i8( %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i8(,,,, i8*, , i64) + +define @test_vlseg4_nxv8i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv8i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8(,,,,, i8*, , i64) + +define @test_vlseg5_nxv8i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv8i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8(,,,,,, i8*, , i64) + +define @test_vlseg6_nxv8i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv8i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8(,,,,,,, i8*, , i64) + +define @test_vlseg7_nxv8i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv8i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8(,,,,,,,, i8*, , i64) + +define @test_vlseg8_nxv8i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv8i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4i64(i64* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i64(,, i64*, , i64) + +define @test_vlseg2_nxv4i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlseg2e64.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlseg2e64.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlseg2e64.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i64( %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i16(,, i16*, , i64) + +define @test_vlseg2_nxv4i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i16( %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv4i16(i16* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i16(,,, i16*, , i64) + +define @test_vlseg3_nxv4i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv4i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i16( %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i16(,,,, i16*, , i64) + +define @test_vlseg4_nxv4i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv4i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16(,,,,, i16*, , i64) + +define @test_vlseg5_nxv4i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv4i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16(,,,,,, i16*, , i64) + +define @test_vlseg6_nxv4i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv4i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16(,,,,,,, i16*, , i64) + +define @test_vlseg7_nxv4i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv4i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16(,,,,,,,, i16*, , i64) + +define @test_vlseg8_nxv4i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv4i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i8(,, i8*, , i64) + +define @test_vlseg2_nxv1i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i8( %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1i8(i8* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i8(,,, i8*, , i64) + +define @test_vlseg3_nxv1i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i8( %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i8(,,,, i8*, , i64) + +define @test_vlseg4_nxv1i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8(,,,,, i8*, , i64) + +define @test_vlseg5_nxv1i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8(,,,,,, i8*, , i64) + +define @test_vlseg6_nxv1i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8(,,,,,,, i8*, , i64) + +define @test_vlseg7_nxv1i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8(,,,,,,,, i8*, , i64) + +define @test_vlseg8_nxv1i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i8(,, i8*, , i64) + +define @test_vlseg2_nxv2i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i8( %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2i8(i8* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i8(,,, i8*, , i64) + +define @test_vlseg3_nxv2i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i8( %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i8(,,,, i8*, , i64) + +define @test_vlseg4_nxv2i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8(,,,,, i8*, , i64) + +define @test_vlseg5_nxv2i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv2i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8(,,,,,, i8*, , i64) + +define @test_vlseg6_nxv2i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv2i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8(,,,,,,, i8*, , i64) + +define @test_vlseg7_nxv2i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv2i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8(,,,,,,,, i8*, , i64) + +define @test_vlseg8_nxv2i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv2i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv8i32(i32* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i32(,, i32*, , i64) + +define @test_vlseg2_nxv8i32(i32* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv8i32(i32* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i32( %1, %1, i32* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv32i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv32i8(,, i8*, , i64) + +define @test_vlseg2_nxv32i8(i8* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlseg2e8.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv32i8(i8* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlseg2e8.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlseg2e8.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv32i8( %1, %1, i8* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i16(,, i16*, , i64) + +define @test_vlseg2_nxv2i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i16( %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2i16(i16* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i16(,,, i16*, , i64) + +define @test_vlseg3_nxv2i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i16( %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i16(,,,, i16*, , i64) + +define @test_vlseg4_nxv2i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16(,,,,, i16*, , i64) + +define @test_vlseg5_nxv2i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv2i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16(,,,,,, i16*, , i64) + +define @test_vlseg6_nxv2i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv2i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16(,,,,,,, i16*, , i64) + +define @test_vlseg7_nxv2i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv2i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16(,,,,,,,, i16*, , i64) + +define @test_vlseg8_nxv2i16(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv2i16(i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2i64(i64* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i64(,, i64*, , i64) + +define @test_vlseg2_nxv2i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg2e64.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg2e64.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg2e64.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i64( %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2i64(i64* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i64(,,, i64*, , i64) + +define @test_vlseg3_nxv2i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg3e64.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg3e64.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg3e64.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i64( %1, %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2i64(i64* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i64(,,,, i64*, , i64) + +define @test_vlseg4_nxv2i64(i64* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg4e64.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2i64(i64* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg4e64.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg4e64.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i64( %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv16f16(half* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv16f16(,, half*, , i64) + +define @test_vlseg2_nxv16f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv16f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16f16( %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4f64(double* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f64(,, double*, , i64) + +define @test_vlseg2_nxv4f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlseg2e64.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlseg2e64.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlseg2e64.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f64( %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1f64(double* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f64(,, double*, , i64) + +define @test_vlseg2_nxv1f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg2e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg2e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg2e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f64( %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1f64(double* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f64(,,, double*, , i64) + +define @test_vlseg3_nxv1f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg3e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg3e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg3e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f64( %1, %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1f64(double* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f64(,,,, double*, , i64) + +define @test_vlseg4_nxv1f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg4e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg4e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg4e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64(,,,,, double*, , i64) + +define @test_vlseg5_nxv1f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg5e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg5e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg5e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64(,,,,,, double*, , i64) + +define @test_vlseg6_nxv1f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg6e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg6e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg6e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64(,,,,,,, double*, , i64) + +define @test_vlseg7_nxv1f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg7e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg7e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg7e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64(,,,,,,,, double*, , i64) + +define @test_vlseg8_nxv1f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg8e64.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg8e64.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg8e64.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2f32(float* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f32(,, float*, , i64) + +define @test_vlseg2_nxv2f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f32( %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2f32(float* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f32(,,, float*, , i64) + +define @test_vlseg3_nxv2f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f32( %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2f32(float* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f32(,,,, float*, , i64) + +define @test_vlseg4_nxv2f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32(,,,,, float*, , i64) + +define @test_vlseg5_nxv2f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv2f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32(,,,,,, float*, , i64) + +define @test_vlseg6_nxv2f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv2f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32(,,,,,,, float*, , i64) + +define @test_vlseg7_nxv2f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv2f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32(,,,,,,,, float*, , i64) + +define @test_vlseg8_nxv2f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv2f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1f16(half* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f16(,, half*, , i64) + +define @test_vlseg2_nxv1f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f16( %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1f16(half* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f16(,,, half*, , i64) + +define @test_vlseg3_nxv1f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f16( %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1f16(half* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f16(,,,, half*, , i64) + +define @test_vlseg4_nxv1f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16(,,,,, half*, , i64) + +define @test_vlseg5_nxv1f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16(,,,,,, half*, , i64) + +define @test_vlseg6_nxv1f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16(,,,,,,, half*, , i64) + +define @test_vlseg7_nxv1f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16(,,,,,,,, half*, , i64) + +define @test_vlseg8_nxv1f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv1f32(float* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f32(,, float*, , i64) + +define @test_vlseg2_nxv1f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv1f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f32( %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv1f32(float* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f32(,,, float*, , i64) + +define @test_vlseg3_nxv1f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv1f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f32( %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv1f32(float* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f32(,,,, float*, , i64) + +define @test_vlseg4_nxv1f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv1f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32(,,,,, float*, , i64) + +define @test_vlseg5_nxv1f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv1f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32(,,,,,, float*, , i64) + +define @test_vlseg6_nxv1f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv1f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32(,,,,,,, float*, , i64) + +define @test_vlseg7_nxv1f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv1f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32(,,,,,,,, float*, , i64) + +define @test_vlseg8_nxv1f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv1f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv8f16(half* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv8f16(,, half*, , i64) + +define @test_vlseg2_nxv8f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv8f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg2e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f16( %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv8f16(half* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8f16(,,, half*, , i64) + +define @test_vlseg3_nxv8f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv8f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg3e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8f16( %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv8f16(half* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8f16(,,,, half*, , i64) + +define @test_vlseg4_nxv8f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv8f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg4e16.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv8f32(float* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv8f32(,, float*, , i64) + +define @test_vlseg2_nxv8f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv8f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0) +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlseg2e32.v v12, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f32( %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2f64(double* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f64(,, double*, , i64) + +define @test_vlseg2_nxv2f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg2e64.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg2e64.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg2e64.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f64( %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2f64(double* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f64(,,, double*, , i64) + +define @test_vlseg3_nxv2f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg3e64.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg3e64.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg3e64.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f64( %1, %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2f64(double* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f64(,,,, double*, , i64) + +define @test_vlseg4_nxv2f64(double* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg4e64.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2f64(double* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg4e64.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg4e64.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4f16(half* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f16(,, half*, , i64) + +define @test_vlseg2_nxv4f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f16( %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv4f16(half* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4f16(,,, half*, , i64) + +define @test_vlseg3_nxv4f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv4f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f16( %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv4f16(half* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f16(,,,, half*, , i64) + +define @test_vlseg4_nxv4f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv4f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16(,,,,, half*, , i64) + +define @test_vlseg5_nxv4f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv4f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16(,,,,,, half*, , i64) + +define @test_vlseg6_nxv4f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv4f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16(,,,,,,, half*, , i64) + +define @test_vlseg7_nxv4f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv4f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16(,,,,,,,, half*, , i64) + +define @test_vlseg8_nxv4f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv4f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv2f16(half* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f16(,, half*, , i64) + +define @test_vlseg2_nxv2f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv2f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f16( %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv2f16(half* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f16(,,, half*, , i64) + +define @test_vlseg3_nxv2f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv2f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f16( %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv2f16(half* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f16(,,,, half*, , i64) + +define @test_vlseg4_nxv2f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv2f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16(,,,,, half*, , i64) + +define @test_vlseg5_nxv2f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlseg5_mask_nxv2f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16(,,,,,, half*, , i64) + +define @test_vlseg6_nxv2f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg6_mask_nxv2f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16(,,,,,,, half*, , i64) + +define @test_vlseg7_nxv2f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg7_mask_nxv2f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16(,,,,,,,, half*, , i64) + +define @test_vlseg8_nxv2f16(half* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlseg8_mask_nxv2f16(half* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0) +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlseg2.nxv4f32(float* , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f32(,, float*, , i64) + +define @test_vlseg2_nxv4f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlseg2_mask_nxv4f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg2e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f32( %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlseg3.nxv4f32(float* , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4f32(,,, float*, , i64) + +define @test_vlseg3_nxv4f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlseg3_mask_nxv4f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg3e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f32( %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlseg4.nxv4f32(float* , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f32(,,,, float*, , i64) + +define @test_vlseg4_nxv4f32(float* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlseg4_mask_nxv4f32(float* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0) +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg4e32.v v14, (a0), v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} +