diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -524,6 +524,30 @@ llvm_anyint_ty]), [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For unit stride fault-only-first segment load + // Input: (pointer, vl) + // Output: (data, vl) + // NOTE: We model this with default memory properties since we model writing + // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. + class RISCVUSSegLoadFF + : Intrinsic, + !add(nf, -1)), [llvm_anyint_ty]), + [LLVMPointerToElt<0>, LLVMMatchType<1>], + [NoCapture>]>, RISCVVIntrinsic; + // For unit stride fault-only-first segment load with mask + // Input: (maskedoff, pointer, mask, vl) + // Output: (data, vl) + // NOTE: We model this with default memory properties since we model writing + // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. + class RISCVUSSegLoadFFMask + : Intrinsic, + !add(nf, -1)), [llvm_anyint_ty]), + !listconcat(!listsplat(LLVMMatchType<0>, nf), + [LLVMPointerToElt<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMMatchType<1>]), + [NoCapture>]>, RISCVVIntrinsic; + // For stride segment load // Input: (pointer, offset, vl) class RISCVSSegLoad @@ -731,6 +755,10 @@ def "int_riscv_" # NAME : RISCVUSSegLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask; } + multiclass RISCVUSSegLoadFF { + def "int_riscv_" # NAME : RISCVUSSegLoadFF; + def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMask; + } multiclass RISCVSSegLoad { def "int_riscv_" # NAME : RISCVSSegLoad; def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask; @@ -1048,6 +1076,7 @@ foreach nf = [2, 3, 4, 5, 6, 7, 8] in { defm vlseg # nf : RISCVUSSegLoad; + defm vlseg # nf # ff : RISCVUSSegLoadFF; defm vlsseg # nf : RISCVSSegLoad; defm vloxseg # nf : RISCVISegLoad; defm vluxseg # nf : RISCVISegLoad; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -57,6 +57,8 @@ void selectVLSEG(SDNode *Node, unsigned IntNo, bool IsStrided); void selectVLSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided); + void selectVLSEGFF(SDNode *Node); + void selectVLSEGFFMask(SDNode *Node); void selectVLXSEG(SDNode *Node, unsigned IntNo); void selectVLXSEGMask(SDNode *Node, unsigned IntNo); void selectVSSEG(SDNode *Node, unsigned IntNo, bool IsStrided); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -222,6 +222,70 @@ CurDAG->RemoveDeadNode(Node); } +void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node) { + SDLoc DL(Node); + unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + unsigned NF = Node->getNumValues() - 2; // Do not count Chain and Glue. + EVT VT = Node->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SmallVector Operands; + Operands.push_back(Node->getOperand(2)); // Base pointer. + Operands.push_back(Node->getOperand(3)); // VL. + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); // Chain. + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, ScalarSize, static_cast(LMUL), + static_cast(RISCVVLMUL::LMUL_1)); + SDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, + MVT::Glue, Operands); + SDValue SuperReg = SDValue(Load, 0); + for (unsigned I = 0; I < NF; ++I) + ReplaceUses(SDValue(Node, I), + CurDAG->getTargetExtractSubreg(getSubregIndexByEVT(VT, I), DL, + VT, SuperReg)); + + ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); // Chain. + ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Glue. + CurDAG->RemoveDeadNode(Node); +} + +void RISCVDAGToDAGISel::selectVLSEGFFMask(SDNode *Node) { + SDLoc DL(Node); + unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + unsigned NF = Node->getNumValues() - 2; // Do not count Chain and Glue. + EVT VT = Node->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); + SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); + SmallVector Operands; + Operands.push_back(MaskedOff); + Operands.push_back(Node->getOperand(NF + 2)); // Base pointer. + Operands.push_back(Node->getOperand(NF + 3)); // Mask. + Operands.push_back(Node->getOperand(NF + 4)); // VL. + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); /// Chain. + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, ScalarSize, static_cast(LMUL), + static_cast(RISCVVLMUL::LMUL_1)); + SDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, + MVT::Glue, Operands); + SDValue SuperReg = SDValue(Load, 0); + for (unsigned I = 0; I < NF; ++I) + ReplaceUses(SDValue(Node, I), + CurDAG->getTargetExtractSubreg(getSubregIndexByEVT(VT, I), DL, + VT, SuperReg)); + + ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); // Chain. + ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Glue. + CurDAG->RemoveDeadNode(Node); +} + void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned IntNo) { SDLoc DL(Node); unsigned NF = Node->getNumValues() - 1; @@ -699,6 +763,14 @@ } break; } + case RISCVISD::VLSEGFF: { + selectVLSEGFF(Node); + return; + } + case RISCVISD::VLSEGFF_MASK: { + selectVLSEGFFMask(Node); + return; + } } // Select the default instruction. diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -98,6 +98,9 @@ // Unit-stride fault-only-first load VLEFF, VLEFF_MASK, + // Unit-stride fault-only-first segment load + VLSEGFF, + VLSEGFF_MASK, // read vl CSR READ_VL, }; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1374,6 +1374,7 @@ } } + unsigned NF = 1; switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. @@ -1396,6 +1397,88 @@ SDValue ReadVL = DAG.getNode(RISCVISD::READ_VL, DL, VTs, Load.getValue(2)); return DAG.getMergeValues({Load, ReadVL, Load.getValue(1)}, DL); } + case Intrinsic::riscv_vlseg8ff: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg7ff: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg6ff: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg5ff: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg4ff: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg3ff: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg2ff: { + NF++; + SDLoc DL(Op); + SmallVector EVTs(NF, Op.getValueType()); + EVTs.push_back(MVT::Other); + EVTs.push_back(MVT::Glue); + SDVTList VTs = DAG.getVTList(EVTs); + SDValue Load = + DAG.getNode(RISCVISD::VLSEGFF, DL, VTs, Op.getOperand(0), + Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); + VTs = DAG.getVTList(Op->getValueType(NF), MVT::Other); + SDValue ReadVL = DAG.getNode(RISCVISD::READ_VL, DL, VTs, + /*Glue*/ Load.getValue(NF + 1)); + SmallVector LoadValues; + for (unsigned i = 0; i < NF; ++i) + LoadValues.push_back(Load.getValue(i)); + LoadValues.push_back(ReadVL); + LoadValues.push_back(Load.getValue(NF)); // Chain. + return DAG.getMergeValues(LoadValues, DL); + } + case Intrinsic::riscv_vlseg8ff_mask: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg7ff_mask: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg6ff_mask: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg5ff_mask: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg4ff_mask: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg3ff_mask: + NF++; + LLVM_FALLTHROUGH; + case Intrinsic::riscv_vlseg2ff_mask: { + NF++; + SDLoc DL(Op); + SmallVector EVTs(NF, Op.getValueType()); + EVTs.push_back(MVT::Other); + EVTs.push_back(MVT::Glue); + SDVTList VTs = DAG.getVTList(EVTs); + SmallVector LoadOps; + LoadOps.push_back(Op.getOperand(0)); // Chain. + LoadOps.push_back(Op.getOperand(1)); // Intrinsic ID. + for (unsigned i = 0; i < NF; ++i) + LoadOps.push_back(Op.getOperand(2 + i)); // MaskedOff. + LoadOps.push_back(Op.getOperand(2 + NF)); // Base. + LoadOps.push_back(Op.getOperand(3 + NF)); // Mask. + LoadOps.push_back(Op.getOperand(4 + NF)); // VL. + SDValue Load = DAG.getNode(RISCVISD::VLSEGFF_MASK, DL, VTs, LoadOps); + VTs = DAG.getVTList(Op->getValueType(NF), MVT::Other); + SDValue ReadVL = DAG.getNode(RISCVISD::READ_VL, DL, VTs, + /*Glue*/ Load.getValue(NF + 1)); + SmallVector LoadValues; + for (unsigned i = 0; i < NF; ++i) + LoadValues.push_back(Load.getValue(i)); + LoadValues.push_back(ReadVL); + LoadValues.push_back(Load.getValue(NF)); // Chain. + return DAG.getMergeValues(LoadValues, DL); + } } } @@ -3846,6 +3929,8 @@ NODE_NAME_CASE(TRUNCATE_VECTOR) NODE_NAME_CASE(VLEFF) NODE_NAME_CASE(VLEFF_MASK) + NODE_NAME_CASE(VLSEGFF) + NODE_NAME_CASE(VLSEGFF_MASK) NODE_NAME_CASE(READ_VL) } // clang-format on diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -474,14 +474,15 @@ } class ToLowerCase { - string L = !subst("VLSEG", "vlseg", + string L = !subst("FF", "ff", + !subst("VLSEG", "vlseg", !subst("VLSSEG", "vlsseg", !subst("VSSEG", "vsseg", !subst("VSSSEG", "vssseg", !subst("VLOXSEG", "vloxseg", !subst("VLUXSEG", "vluxseg", !subst("VSOXSEG", "vsoxseg", - !subst("VSUXSEG", "vsuxseg", Upper)))))))); + !subst("VSUXSEG", "vsuxseg", Upper))))))))); } // Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2 @@ -1840,15 +1841,18 @@ defm _W : VPseudoConversion; } -multiclass VPseudoUSSegLoad { +multiclass VPseudoUSSegLoad { foreach eew = EEWList in { foreach lmul = MxSet.m in { defvar LInfo = lmul.MX; let VLMul = lmul.value in { foreach nf = NFSet.L in { defvar vreg = SegRegClass.RC; - def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegLoadNoMask; - def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegLoadMask; + defvar FFStr = !if(isFF, "FF", ""); + def nf # "E" # eew # FFStr # "_V_" # LInfo : + VPseudoUSSegLoadNoMask; + def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" : + VPseudoUSSegLoadMask; } } } @@ -3199,7 +3203,7 @@ //===----------------------------------------------------------------------===// // 7.8. Vector Load/Store Segment Instructions //===----------------------------------------------------------------------===// -defm PseudoVLSEG : VPseudoUSSegLoad; +defm PseudoVLSEG : VPseudoUSSegLoad; defm PseudoVLSSEG : VPseudoSSegLoad; defm PseudoVLOXSEG : VPseudoISegLoad; defm PseudoVLUXSEG : VPseudoISegLoad; @@ -3208,6 +3212,10 @@ defm PseudoVSOXSEG : VPseudoISegStore; defm PseudoVSUXSEG : VPseudoISegStore; +// vlsegeff.v may update VL register +let hasSideEffects = 1, Defs = [VL] in +defm PseudoVLSEG : VPseudoUSSegLoad; + //===----------------------------------------------------------------------===// // 8. Vector AMO Operations //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -0,0 +1,89 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32) + +define void @test_vlseg2ff_dead_value(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_dead_value: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v0, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_dead_value( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_dead_value: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store i32 %1, i32* %outvl + ret void +} + +define @test_vlseg2ff_dead_vl(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2ff_dead_vl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + ret %1 +} + +define @test_vlseg2ff_mask_dead_vl( %val, i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2ff_mask_dead_vl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + ret %1 +} + +define void @test_vlseg2ff_dead_all(i16* %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2ff_dead_all: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v0, (a0) +; CHECK-NEXT: ret +entry: + tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) + ret void +} + +define void @test_vlseg2ff_mask_dead_all( %val, i16* %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2ff_mask_dead_all: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll @@ -0,0 +1,5239 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32) + +define @test_vlseg2ff_nxv16i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv16i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i32) + +define @test_vlseg2ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8( %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i8(i8* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8(,,, i8*, , i32) + +define @test_vlseg3ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i8(i8* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8(,,,, i8*, , i32) + +define @test_vlseg4ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i8(i8* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8(,,,,, i8*, , i32) + +define @test_vlseg5ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i8(i8* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8(,,,,,, i8*, , i32) + +define @test_vlseg6ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i8(i8* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8(,,,,,,, i8*, , i32) + +define @test_vlseg7ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i8(i8* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8(,,,,,,,, i8*, , i32) + +define @test_vlseg8ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i32) + +define @test_vlseg2ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv16i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv16i8(i8* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8(,,, i8*, , i32) + +define @test_vlseg3ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg3e8ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv16i8(i8* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv16i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv16i8(i8* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8(,,,, i8*, , i32) + +define @test_vlseg4ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg4e8ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv16i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv16i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i32) + +define @test_vlseg2ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i32(i32* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32(,,, i32*, , i32) + +define @test_vlseg3ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i32(i32* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32(,,,, i32*, , i32) + +define @test_vlseg4ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i32(i32* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32(,,,,, i32*, , i32) + +define @test_vlseg5ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv2i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i32(i32* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32(,,,,,, i32*, , i32) + +define @test_vlseg6ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv2i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i32(i32* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32(,,,,,,, i32*, , i32) + +define @test_vlseg7ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv2i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i32(i32* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32(,,,,,,,, i32*, , i32) + +define @test_vlseg8ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv2i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i32) + +define @test_vlseg2ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i16(i16* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16(,,, i16*, , i32) + +define @test_vlseg3ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv4i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i16(i16* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16(,,,, i16*, , i32) + +define @test_vlseg4ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv4i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i16(i16* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16(,,,,, i16*, , i32) + +define @test_vlseg5ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv4i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i16(i16* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16(,,,,,, i16*, , i32) + +define @test_vlseg6ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv4i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i16(i16* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16(,,,,,,, i16*, , i32) + +define @test_vlseg7ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv4i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i16(i16* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16(,,,,,,,, i16*, , i32) + +define @test_vlseg8ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv4i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i32) + +define @test_vlseg2ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32( %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i32(i32* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32(,,, i32*, , i32) + +define @test_vlseg3ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i32(i32* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32(,,,, i32*, , i32) + +define @test_vlseg4ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i32(i32* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32(,,,,, i32*, , i32) + +define @test_vlseg5ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i32(i32* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32(,,,,,, i32*, , i32) + +define @test_vlseg6ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i32(i32* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32(,,,,,,, i32*, , i32) + +define @test_vlseg7ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i32(i32* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32(,,,,,,,, i32*, , i32) + +define @test_vlseg8ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i32) + +define @test_vlseg2ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv8i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8i16(i16* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16(,,, i16*, , i32) + +define @test_vlseg3ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8i16(i16* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv8i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i16(i16* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16(,,,, i16*, , i32) + +define @test_vlseg4ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv8i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i32) + +define @test_vlseg2ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv8i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8i8(i8* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8(,,, i8*, , i32) + +define @test_vlseg3ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv8i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i8(i8* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8(,,,, i8*, , i32) + +define @test_vlseg4ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv8i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv8i8(i8* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8(,,,,, i8*, , i32) + +define @test_vlseg5ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv8i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv8i8(i8* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8(,,,,,, i8*, , i32) + +define @test_vlseg6ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv8i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv8i8(i8* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8(,,,,,,, i8*, , i32) + +define @test_vlseg7ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv8i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv8i8(i8* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8(,,,,,,,, i8*, , i32) + +define @test_vlseg8ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv8i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i32) + +define @test_vlseg2ff_nxv8i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(i32* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv8i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32( %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i32) + +define @test_vlseg2ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8( %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i8(i8* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8(,,, i8*, , i32) + +define @test_vlseg3ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv4i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i8(i8* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8(,,,, i8*, , i32) + +define @test_vlseg4ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv4i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i8(i8* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8(,,,,, i8*, , i32) + +define @test_vlseg5ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv4i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i8(i8* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8(,,,,,, i8*, , i32) + +define @test_vlseg6ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv4i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i8(i8* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8(,,,,,,, i8*, , i32) + +define @test_vlseg7ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv4i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i8(i8* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8(,,,,,,,, i8*, , i32) + +define @test_vlseg8ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv4i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i32) + +define @test_vlseg2ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16( %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i16(i16* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16(,,, i16*, , i32) + +define @test_vlseg3ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i16(i16* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16(,,,, i16*, , i32) + +define @test_vlseg4ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i16(i16* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16(,,,,, i16*, , i32) + +define @test_vlseg5ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i16(i16* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16(,,,,,, i16*, , i32) + +define @test_vlseg6ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i16(i16* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16(,,,,,,, i16*, , i32) + +define @test_vlseg7ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i16(i16* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16(,,,,,,,, i16*, , i32) + +define @test_vlseg8ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i32) + +define @test_vlseg2ff_nxv32i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv32i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i32) + +define @test_vlseg2ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8( %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i8(i8* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8(,,, i8*, , i32) + +define @test_vlseg3ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i8(i8* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8(,,,, i8*, , i32) + +define @test_vlseg4ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i8(i8* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8(,,,,, i8*, , i32) + +define @test_vlseg5ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv2i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i8(i8* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8(,,,,,, i8*, , i32) + +define @test_vlseg6ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv2i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i8(i8* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8(,,,,,,, i8*, , i32) + +define @test_vlseg7ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv2i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i8(i8* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8(,,,,,,,, i8*, , i32) + +define @test_vlseg8ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i8(i8* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv2i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i32) + +define @test_vlseg2ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16( %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i16(i16* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16(,,, i16*, , i32) + +define @test_vlseg3ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i16(i16* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16(,,,, i16*, , i32) + +define @test_vlseg4ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i16(i16* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16(,,,,, i16*, , i32) + +define @test_vlseg5ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv2i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i16(i16* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16(,,,,,, i16*, , i32) + +define @test_vlseg6ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv2i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i16(i16* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16(,,,,,,, i16*, , i32) + +define @test_vlseg7ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv2i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i16(i16* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16(,,,,,,,, i16*, , i32) + +define @test_vlseg8ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i16(i16* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv2i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i32) + +define @test_vlseg2ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i32(i32* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32(,,, i32*, , i32) + +define @test_vlseg3ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i32(i32* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv4i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i32(i32* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32(,,,, i32*, , i32) + +define @test_vlseg4ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i32(i32* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv4i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16f16(half* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16(,, half*, , i32) + +define @test_vlseg2ff_nxv16f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16f16(half* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv16f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16( %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f64(double* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64(,, double*, , i32) + +define @test_vlseg2ff_nxv4f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlseg2e64ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f64(double* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64( %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f64(double* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64(,, double*, , i32) + +define @test_vlseg2ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg2e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64( %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f64(double* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64(,,, double*, , i32) + +define @test_vlseg3ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg3e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f64(double* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64(,,,, double*, , i32) + +define @test_vlseg4ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg4e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f64(double* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64(,,,,, double*, , i32) + +define @test_vlseg5ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg5e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f64(double* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64(,,,,,, double*, , i32) + +define @test_vlseg6ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg6e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f64(double* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64(,,,,,,, double*, , i32) + +define @test_vlseg7ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg7e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f64(double* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64(,,,,,,,, double*, , i32) + +define @test_vlseg8ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg8e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f64(double* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f32(float* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32(,, float*, , i32) + +define @test_vlseg2ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32( %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f32(float* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32(,,, float*, , i32) + +define @test_vlseg3ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f32(float* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32(,,,, float*, , i32) + +define @test_vlseg4ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f32(float* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32(,,,,, float*, , i32) + +define @test_vlseg5ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv2f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f32(float* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32(,,,,,, float*, , i32) + +define @test_vlseg6ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv2f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f32(float* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32(,,,,,,, float*, , i32) + +define @test_vlseg7ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv2f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f32(float* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32(,,,,,,,, float*, , i32) + +define @test_vlseg8ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv2f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f16(half* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16(,, half*, , i32) + +define @test_vlseg2ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16( %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f16(half* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16(,,, half*, , i32) + +define @test_vlseg3ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f16(half* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16(,,,, half*, , i32) + +define @test_vlseg4ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f16(half* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16(,,,,, half*, , i32) + +define @test_vlseg5ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f16(half* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16(,,,,,, half*, , i32) + +define @test_vlseg6ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f16(half* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16(,,,,,,, half*, , i32) + +define @test_vlseg7ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f16(half* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16(,,,,,,,, half*, , i32) + +define @test_vlseg8ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f32(float* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32(,, float*, , i32) + +define @test_vlseg2ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32( %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f32(float* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32(,,, float*, , i32) + +define @test_vlseg3ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f32(float* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32(,,,, float*, , i32) + +define @test_vlseg4ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f32(float* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32(,,,,, float*, , i32) + +define @test_vlseg5ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f32(float* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32(,,,,,, float*, , i32) + +define @test_vlseg6ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f32(float* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32(,,,,,,, float*, , i32) + +define @test_vlseg7ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f32(float* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32(,,,,,,,, float*, , i32) + +define @test_vlseg8ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f32(float* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8f16(half* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16(,, half*, , i32) + +define @test_vlseg2ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8f16(half* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv8f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16( %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8f16(half* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16(,,, half*, , i32) + +define @test_vlseg3ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8f16(half* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv8f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8f16(half* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16(,,,, half*, , i32) + +define @test_vlseg4ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8f16(half* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv8f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8f32(float* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32(,, float*, , i32) + +define @test_vlseg2ff_nxv8f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8f32(float* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv8f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32( %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f64(double* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64(,, double*, , i32) + +define @test_vlseg2ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg2e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f64(double* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64( %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f64(double* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64(,,, double*, , i32) + +define @test_vlseg3ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg3e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f64(double* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f64(double* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64(,,,, double*, , i32) + +define @test_vlseg4ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg4e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f64(double* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2f64( %val, double* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f16(half* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16(,, half*, , i32) + +define @test_vlseg2ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16( %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4f16(half* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16(,,, half*, , i32) + +define @test_vlseg3ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv4f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f16(half* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16(,,,, half*, , i32) + +define @test_vlseg4ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv4f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4f16(half* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16(,,,,, half*, , i32) + +define @test_vlseg5ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv4f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4f16(half* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16(,,,,,, half*, , i32) + +define @test_vlseg6ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv4f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4f16(half* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16(,,,,,,, half*, , i32) + +define @test_vlseg7ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv4f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4f16(half* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16(,,,,,,,, half*, , i32) + +define @test_vlseg8ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv4f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f16(half* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16(,, half*, , i32) + +define @test_vlseg2ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16( %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f16(half* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16(,,, half*, , i32) + +define @test_vlseg3ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f16(half* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16(,,,, half*, , i32) + +define @test_vlseg4ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f16(half* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16(,,,,, half*, , i32) + +define @test_vlseg5ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv2f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,, i32} %0, 1 + %2 = extractvalue {,,,,, i32} %0, 5 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f16(half* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16(,,,,,, half*, , i32) + +define @test_vlseg6ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv2f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,, i32} %0, 6 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f16(half* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16(,,,,,,, half*, , i32) + +define @test_vlseg7ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv2f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,, i32} %0, 7 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f16(half* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16(,,,,,,,, half*, , i32) + +define @test_vlseg8ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f16(half* %base, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv2f16( %val, half* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,, i32} %0, 1 + %2 = extractvalue {,,,,,,,, i32} %0, 8 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f32(float* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32(,, float*, , i32) + +define @test_vlseg2ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f32(float* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32( %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,, i32} %0, 1 + %2 = extractvalue {,, i32} %0, 2 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4f32(float* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32(,,, float*, , i32) + +define @test_vlseg3ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4f32(float* %base, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv4f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,, i32} %0, 1 + %2 = extractvalue {,,, i32} %0, 3 + store i32 %2, i32* %outvl + ret %1 +} + +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f32(float* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32(,,,, float*, , i32) + +define @test_vlseg4ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f32(float* %base, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv4f32( %val, float* %base, i32 %vl, %mask, i32* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %1 = extractvalue {,,,, i32} %0, 1 + %2 = extractvalue {,,,, i32} %0, 4 + store i32 %2, i32* %outvl + ret %1 +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -0,0 +1,89 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64) + +define void @test_vlseg2ff_dead_value(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_dead_value: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v0, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_dead_value( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_dead_value: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store i64 %1, i64* %outvl + ret void +} + +define @test_vlseg2ff_dead_vl(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2ff_dead_vl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + ret %1 +} + +define @test_vlseg2ff_mask_dead_vl( %val, i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2ff_mask_dead_vl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + ret %1 +} + +define void @test_vlseg2ff_dead_all(i16* %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2ff_dead_all: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v0, (a0) +; CHECK-NEXT: ret +entry: + tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) + ret void +} + +define void @test_vlseg2ff_mask_dead_all( %val, i16* %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2ff_mask_dead_all: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll @@ -0,0 +1,5681 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64) + +define @test_vlseg2ff_nxv16i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv16i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i64) + +define @test_vlseg2ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i32(i32* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32(,,, i32*, , i64) + +define @test_vlseg3ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i32(i32* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv4i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i32(i32* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32(,,,, i32*, , i64) + +define @test_vlseg4ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv4i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i64) + +define @test_vlseg2ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv16i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv16i8(i8* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8(,,, i8*, , i64) + +define @test_vlseg3ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg3e8ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv16i8(i8* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv16i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv16i8(i8* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8(,,,, i8*, , i64) + +define @test_vlseg4ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlseg4e8ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv16i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv16i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, i64*, , i64) + +define @test_vlseg2ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg2e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i64(i64* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64(,,, i64*, , i64) + +define @test_vlseg3ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg3e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64( %val, %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i64(i64* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64(,,,, i64*, , i64) + +define @test_vlseg4ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg4e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i64(i64* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64(,,,,, i64*, , i64) + +define @test_vlseg5ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg5e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i64(i64* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64(,,,,,, i64*, , i64) + +define @test_vlseg6ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg6e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i64(i64* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64(,,,,,,, i64*, , i64) + +define @test_vlseg7ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg7e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i64(i64* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64(,,,,,,,, i64*, , i64) + +define @test_vlseg8ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg8e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i64) + +define @test_vlseg2ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32( %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i32(i32* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32(,,, i32*, , i64) + +define @test_vlseg3ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i32(i32* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32(,,,, i32*, , i64) + +define @test_vlseg4ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i32(i32* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32(,,,,, i32*, , i64) + +define @test_vlseg5ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i32(i32* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32(,,,,,, i32*, , i64) + +define @test_vlseg6ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i32(i32* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32(,,,,,,, i32*, , i64) + +define @test_vlseg7ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i32(i32* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32(,,,,,,,, i32*, , i64) + +define @test_vlseg8ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i64) + +define @test_vlseg2ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv8i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8i16(i16* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16(,,, i16*, , i64) + +define @test_vlseg3ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8i16(i16* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv8i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i16(i16* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16(,,,, i16*, , i64) + +define @test_vlseg4ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv8i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i64) + +define @test_vlseg2ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8( %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i8(i8* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8(,,, i8*, , i64) + +define @test_vlseg3ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv4i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i8(i8* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8(,,,, i8*, , i64) + +define @test_vlseg4ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv4i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i8(i8* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8(,,,,, i8*, , i64) + +define @test_vlseg5ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv4i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i8(i8* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8(,,,,,, i8*, , i64) + +define @test_vlseg6ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv4i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i8(i8* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8(,,,,,,, i8*, , i64) + +define @test_vlseg7ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv4i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i8(i8* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8(,,,,,,,, i8*, , i64) + +define @test_vlseg8ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv4i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i64) + +define @test_vlseg2ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16( %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i16(i16* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16(,,, i16*, , i64) + +define @test_vlseg3ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i16(i16* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16(,,,, i16*, , i64) + +define @test_vlseg4ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i16(i16* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16(,,,,, i16*, , i64) + +define @test_vlseg5ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i16(i16* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16(,,,,,, i16*, , i64) + +define @test_vlseg6ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i16(i16* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16(,,,,,,, i16*, , i64) + +define @test_vlseg7ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i16(i16* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16(,,,,,,,, i16*, , i64) + +define @test_vlseg8ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i64) + +define @test_vlseg2ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i32(i32* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32(,,, i32*, , i64) + +define @test_vlseg3ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i32(i32* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32(,,,, i32*, , i64) + +define @test_vlseg4ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i32(i32* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32(,,,,, i32*, , i64) + +define @test_vlseg5ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv2i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i32(i32* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32(,,,,,, i32*, , i64) + +define @test_vlseg6ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv2i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i32(i32* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32(,,,,,,, i32*, , i64) + +define @test_vlseg7ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv2i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i32(i32* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32(,,,,,,,, i32*, , i64) + +define @test_vlseg8ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv2i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i64) + +define @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv8i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8i8(i8* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8(,,, i8*, , i64) + +define @test_vlseg3ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv8i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i8(i8* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8(,,,, i8*, , i64) + +define @test_vlseg4ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv8i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv8i8(i8* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8(,,,,, i8*, , i64) + +define @test_vlseg5ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv8i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv8i8(i8* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8(,,,,,, i8*, , i64) + +define @test_vlseg6ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv8i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv8i8(i8* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8(,,,,,,, i8*, , i64) + +define @test_vlseg7ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv8i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv8i8(i8* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8(,,,,,,,, i8*, , i64) + +define @test_vlseg8ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv8i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, i64*, , i64) + +define @test_vlseg2ff_nxv4i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlseg2e64ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i64) + +define @test_vlseg2ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i16(i16* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16(,,, i16*, , i64) + +define @test_vlseg3ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv4i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i16(i16* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16(,,,, i16*, , i64) + +define @test_vlseg4ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv4i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i16(i16* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16(,,,,, i16*, , i64) + +define @test_vlseg5ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv4i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i16(i16* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16(,,,,,, i16*, , i64) + +define @test_vlseg6ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv4i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i16(i16* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16(,,,,,,, i16*, , i64) + +define @test_vlseg7ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv4i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i16(i16* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16(,,,,,,,, i16*, , i64) + +define @test_vlseg8ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv4i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i64) + +define @test_vlseg2ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8( %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i8(i8* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8(,,, i8*, , i64) + +define @test_vlseg3ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i8(i8* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8(,,,, i8*, , i64) + +define @test_vlseg4ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i8(i8* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8(,,,,, i8*, , i64) + +define @test_vlseg5ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i8(i8* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8(,,,,,, i8*, , i64) + +define @test_vlseg6ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i8(i8* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8(,,,,,,, i8*, , i64) + +define @test_vlseg7ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i8(i8* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8(,,,,,,,, i8*, , i64) + +define @test_vlseg8ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i64) + +define @test_vlseg2ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8( %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i8(i8* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8(,,, i8*, , i64) + +define @test_vlseg3ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i8(i8* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8(,,,, i8*, , i64) + +define @test_vlseg4ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i8(i8* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8(,,,,, i8*, , i64) + +define @test_vlseg5ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv2i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i8(i8* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8(,,,,,, i8*, , i64) + +define @test_vlseg6ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv2i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i8(i8* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8(,,,,,,, i8*, , i64) + +define @test_vlseg7ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv2i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i8(i8* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8(,,,,,,,, i8*, , i64) + +define @test_vlseg8ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i8(i8* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv2i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i64) + +define @test_vlseg2ff_nxv8i32(i32* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(i32* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv8i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32( %val, %val, i32* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i64) + +define @test_vlseg2ff_nxv32i8(i8* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlseg2e8ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv32i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i64) + +define @test_vlseg2ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16( %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i16(i16* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16(,,, i16*, , i64) + +define @test_vlseg3ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i16(i16* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16(,,,, i16*, , i64) + +define @test_vlseg4ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i16(i16* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16(,,,,, i16*, , i64) + +define @test_vlseg5ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv2i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i16(i16* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16(,,,,,, i16*, , i64) + +define @test_vlseg6ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv2i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i16(i16* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16(,,,,,,, i16*, , i64) + +define @test_vlseg7ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv2i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i16(i16* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16(,,,,,,,, i16*, , i64) + +define @test_vlseg8ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i16(i16* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv2i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, i64*, , i64) + +define @test_vlseg2ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg2e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i64(i64* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64(,,, i64*, , i64) + +define @test_vlseg3ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg3e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i64(i64* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64( %val, %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i64(i64* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64(,,,, i64*, , i64) + +define @test_vlseg4ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg4e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i64(i64* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16f16(half* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16(,, half*, , i64) + +define @test_vlseg2ff_nxv16f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16f16(half* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv16f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16( %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f64(double* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64(,, double*, , i64) + +define @test_vlseg2ff_nxv4f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlseg2e64ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f64(double* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64( %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f64(double* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64(,, double*, , i64) + +define @test_vlseg2ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg2e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64( %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f64(double* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64(,,, double*, , i64) + +define @test_vlseg3ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg3e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f64(double* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64(,,,, double*, , i64) + +define @test_vlseg4ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg4e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f64(double* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64(,,,,, double*, , i64) + +define @test_vlseg5ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg5e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f64(double* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64(,,,,,, double*, , i64) + +define @test_vlseg6ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg6e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f64(double* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64(,,,,,,, double*, , i64) + +define @test_vlseg7ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg7e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f64(double* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64(,,,,,,,, double*, , i64) + +define @test_vlseg8ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlseg8e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f64(double* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f32(float* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32(,, float*, , i64) + +define @test_vlseg2ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32( %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f32(float* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32(,,, float*, , i64) + +define @test_vlseg3ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f32(float* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32(,,,, float*, , i64) + +define @test_vlseg4ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f32(float* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32(,,,,, float*, , i64) + +define @test_vlseg5ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv2f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f32(float* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32(,,,,,, float*, , i64) + +define @test_vlseg6ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv2f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f32(float* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32(,,,,,,, float*, , i64) + +define @test_vlseg7ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv2f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f32(float* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32(,,,,,,,, float*, , i64) + +define @test_vlseg8ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv2f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f16(half* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16(,, half*, , i64) + +define @test_vlseg2ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16( %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f16(half* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16(,,, half*, , i64) + +define @test_vlseg3ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f16(half* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16(,,,, half*, , i64) + +define @test_vlseg4ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f16(half* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16(,,,,, half*, , i64) + +define @test_vlseg5ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f16(half* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16(,,,,,, half*, , i64) + +define @test_vlseg6ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f16(half* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16(,,,,,,, half*, , i64) + +define @test_vlseg7ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f16(half* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16(,,,,,,,, half*, , i64) + +define @test_vlseg8ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f32(float* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32(,, float*, , i64) + +define @test_vlseg2ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv1f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32( %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f32(float* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32(,,, float*, , i64) + +define @test_vlseg3ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv1f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f32(float* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32(,,,, float*, , i64) + +define @test_vlseg4ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv1f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f32(float* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32(,,,,, float*, , i64) + +define @test_vlseg5ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv1f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f32(float* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32(,,,,,, float*, , i64) + +define @test_vlseg6ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv1f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f32(float* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32(,,,,,,, float*, , i64) + +define @test_vlseg7ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv1f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f32(float* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32(,,,,,,,, float*, , i64) + +define @test_vlseg8ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f32(float* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv1f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8f16(half* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16(,, half*, , i64) + +define @test_vlseg2ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8f16(half* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv8f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16( %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8f16(half* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16(,,, half*, , i64) + +define @test_vlseg3ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8f16(half* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv8f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8f16(half* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16(,,,, half*, , i64) + +define @test_vlseg4ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8f16(half* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv8f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8f32(float* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32(,, float*, , i64) + +define @test_vlseg2ff_nxv8f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8f32(float* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv8f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32( %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f64(double* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64(,, double*, , i64) + +define @test_vlseg2ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg2e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f64(double* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64( %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f64(double* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64(,,, double*, , i64) + +define @test_vlseg3ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg3e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f64(double* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f64(double* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64(,,,, double*, , i64) + +define @test_vlseg4ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlseg4e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f64(double* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2f64( %val, double* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f16(half* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16(,, half*, , i64) + +define @test_vlseg2ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16( %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4f16(half* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16(,,, half*, , i64) + +define @test_vlseg3ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv4f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f16(half* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16(,,,, half*, , i64) + +define @test_vlseg4ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv4f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4f16(half* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16(,,,,, half*, , i64) + +define @test_vlseg5ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv4f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4f16(half* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16(,,,,,, half*, , i64) + +define @test_vlseg6ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv4f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4f16(half* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16(,,,,,,, half*, , i64) + +define @test_vlseg7ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv4f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4f16(half* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16(,,,,,,,, half*, , i64) + +define @test_vlseg8ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv4f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f16(half* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16(,, half*, , i64) + +define @test_vlseg2ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv2f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16( %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f16(half* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16(,,, half*, , i64) + +define @test_vlseg3ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv2f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f16(half* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16(,,,, half*, , i64) + +define @test_vlseg4ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv2f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f16(half* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16(,,,,, half*, , i64) + +define @test_vlseg5ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg5ff_mask_nxv2f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,, i64} %0, 1 + %2 = extractvalue {,,,,, i64} %0, 5 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f16(half* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16(,,,,,, half*, , i64) + +define @test_vlseg6ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg6ff_mask_nxv2f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,, i64} %0, 6 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f16(half* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16(,,,,,,, half*, , i64) + +define @test_vlseg7ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg7ff_mask_nxv2f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,, i64} %0, 7 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f16(half* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16(,,,,,,,, half*, , i64) + +define @test_vlseg8ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f16(half* %base, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg8ff_mask_nxv2f16( %val, half* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,, i64} %0, 1 + %2 = extractvalue {,,,,,,,, i64} %0, 8 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f32(float* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32(,, float*, , i64) + +define @test_vlseg2ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f32(float* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg2ff_mask_nxv4f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32( %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,, i64} %0, 1 + %2 = extractvalue {,, i64} %0, 2 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4f32(float* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32(,,, float*, , i64) + +define @test_vlseg3ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4f32(float* %base, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg3ff_mask_nxv4f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,, i64} %0, 1 + %2 = extractvalue {,,, i64} %0, 3 + store i64 %2, i64* %outvl + ret %1 +} + +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f32(float* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32(,,,, float*, , i64) + +define @test_vlseg4ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f32(float* %base, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} + +define @test_vlseg4ff_mask_nxv4f32( %val, float* %base, i64 %vl, %mask, i64* %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %1 = extractvalue {,,,, i64} %0, 1 + %2 = extractvalue {,,,, i64} %0, 4 + store i64 %2, i64* %outvl + ret %1 +} +