diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -502,6 +502,25 @@ llvm_anyint_ty]), [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For stride segment load + // Input: (pointer, offset, vl) + class RISCVSSegLoad + : Intrinsic, + !add(nf, -1))), + [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For stride segment load with mask + // Input: (maskedoff, pointer, offset, mask, vl) + class RISCVSSegLoadMask + : Intrinsic, + !add(nf, -1))), + !listconcat(!listsplat(LLVMMatchType<0>, nf), + [LLVMPointerToElt<0>, + llvm_anyint_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMMatchType<1>]), + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For unit stride segment store // Input: (value, pointer, vl) class RISCVUSSegStore @@ -627,6 +646,10 @@ def "int_riscv_" # NAME : RISCVUSSegLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask; } + multiclass RISCVSSegLoad { + def "int_riscv_" # NAME : RISCVSSegLoad; + def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask; + } multiclass RISCVUSSegStore { def "int_riscv_" # NAME : RISCVUSSegStore; def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask; @@ -924,6 +947,7 @@ foreach nf = [2, 3, 4, 5, 6, 7, 8] in { defm vlseg # nf : RISCVUSSegLoad; + defm vlsseg # nf : RISCVSSegLoad; defm vsseg # nf : RISCVUSSegStore; } diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -55,8 +55,8 @@ bool selectVSplatSimm5(SDValue N, SDValue &SplatVal); bool selectVSplatUimm5(SDValue N, SDValue &SplatVal); - void selectVLSEG(SDNode *Node, unsigned IntNo); - void selectVLSEGMask(SDNode *Node, unsigned IntNo); + void selectVLSEG(SDNode *Node, unsigned IntNo, bool IsStrided); + void selectVLSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided); void selectVSSEG(SDNode *Node, unsigned IntNo); void selectVSSEGMask(SDNode *Node, unsigned IntNo); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -149,7 +149,8 @@ } } -void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo) { +void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo, + bool IsStrided) { SDLoc DL(Node); unsigned NF = Node->getNumValues() - 1; EVT VT = Node->getValueType(0); @@ -157,9 +158,16 @@ MVT XLenVT = Subtarget->getXLenVT(); RISCVVLMUL LMUL = getLMUL(VT); SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); - SDValue Operands[] = {Node->getOperand(2), // Base pointer. - Node->getOperand(3), // VL. - SEW, Node->getOperand(0)}; // Chain + SmallVector Operands; + Operands.push_back(Node->getOperand(2)); // Base pointer. + if (IsStrided) { + Operands.push_back(Node->getOperand(3)); // Stride. + Operands.push_back(Node->getOperand(4)); // VL. + } else { + Operands.push_back(Node->getOperand(3)); // VL. + } + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); // Chain. const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( IntNo, ScalarSize, static_cast(LMUL)); SDNode *Load = @@ -174,7 +182,8 @@ CurDAG->RemoveDeadNode(Node); } -void RISCVDAGToDAGISel::selectVLSEGMask(SDNode *Node, unsigned IntNo) { +void RISCVDAGToDAGISel::selectVLSEGMask(SDNode *Node, unsigned IntNo, + bool IsStrided) { SDLoc DL(Node); unsigned NF = Node->getNumValues() - 1; EVT VT = Node->getValueType(0); @@ -184,12 +193,19 @@ SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); - SDValue Operands[] = {MaskedOff, - Node->getOperand(NF + 2), // Base pointer. - Node->getOperand(NF + 3), // Mask. - Node->getOperand(NF + 4), // VL. - SEW, - Node->getOperand(0)}; // Chain. + SmallVector Operands; + Operands.push_back(MaskedOff); + Operands.push_back(Node->getOperand(NF + 2)); // Base pointer. + if (IsStrided) { + Operands.push_back(Node->getOperand(NF + 3)); // Stride. + Operands.push_back(Node->getOperand(NF + 4)); // Mask. + Operands.push_back(Node->getOperand(NF + 5)); // VL. + } else { + Operands.push_back(Node->getOperand(NF + 3)); // Mask. + Operands.push_back(Node->getOperand(NF + 4)); // VL. + } + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); /// Chain. const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( IntNo, ScalarSize, static_cast(LMUL)); SDNode *Load = @@ -377,7 +393,7 @@ case Intrinsic::riscv_vlseg6: case Intrinsic::riscv_vlseg7: case Intrinsic::riscv_vlseg8: { - selectVLSEG(Node, IntNo); + selectVLSEG(Node, IntNo, /*IsStrided=*/false); return; } case Intrinsic::riscv_vlseg2_mask: @@ -387,7 +403,27 @@ case Intrinsic::riscv_vlseg6_mask: case Intrinsic::riscv_vlseg7_mask: case Intrinsic::riscv_vlseg8_mask: { - selectVLSEGMask(Node, IntNo); + selectVLSEGMask(Node, IntNo, /*IsStrided=*/false); + return; + } + case Intrinsic::riscv_vlsseg2: + case Intrinsic::riscv_vlsseg3: + case Intrinsic::riscv_vlsseg4: + case Intrinsic::riscv_vlsseg5: + case Intrinsic::riscv_vlsseg6: + case Intrinsic::riscv_vlsseg7: + case Intrinsic::riscv_vlsseg8: { + selectVLSEG(Node, IntNo, /*IsStrided=*/true); + return; + } + case Intrinsic::riscv_vlsseg2_mask: + case Intrinsic::riscv_vlsseg3_mask: + case Intrinsic::riscv_vlsseg4_mask: + case Intrinsic::riscv_vlsseg5_mask: + case Intrinsic::riscv_vlsseg6_mask: + case Intrinsic::riscv_vlsseg7_mask: + case Intrinsic::riscv_vlsseg8_mask: { + selectVLSEGMask(Node, IntNo, /*IsStrided=*/true); return; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -439,7 +439,8 @@ class ToLowerCase { string L = !subst("VLSEG", "vlseg", - !subst("VSSEG", "vsseg", Upper)); + !subst("VLSSEG", "vlsseg", + !subst("VSSEG", "vsseg", Upper))); } // Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2 @@ -1009,6 +1010,40 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoSSegLoadNoMask EEW>: + Pseudo<(outs RetClass:$rd), + (ins GPR:$rs1, GPR:$offset, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoSSegLoadMask EEW>: + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, GPR:$rs1, + GPR:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = "$rd = $merge"; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoUSSegStoreNoMask EEW>: Pseudo<(outs), (ins ValClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, @@ -1564,6 +1599,21 @@ } } +multiclass VPseudoSSegLoad { + foreach eew = EEWList in { + foreach lmul = MxSet.m in { + defvar LInfo = lmul.MX; + let VLMul = lmul.value in { + foreach nf = NFSet.L in { + defvar vreg = SegRegClass.RC; + def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask; + def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask; + } + } + } + } +} + multiclass VPseudoUSSegStore { foreach eew = EEWList in { foreach lmul = MxSet.m in { @@ -2778,6 +2828,7 @@ // 7.8. Vector Load/Store Segment Instructions //===----------------------------------------------------------------------===// defm PseudoVLSEG : VPseudoUSSegLoad; +defm PseudoVLSSEG : VPseudoSSegLoad; defm PseudoVSSEG : VPseudoUSSegStore; //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll @@ -0,0 +1,4722 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare {,} @llvm.riscv.vlsseg2.nxv16i16(i16*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i32, , i32) + +define @test_vlsseg2_nxv16i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv16i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e16,m4,tu,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i8(,, i8*, i32, , i32) + +define @test_vlsseg2_nxv1i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1i8(i8*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i8(,,, i8*, i32, , i32) + +define @test_vlsseg3_nxv1i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8(,,,, i8*, i32, , i32) + +define @test_vlsseg4_nxv1i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8(,,,,, i8*, i32, , i32) + +define @test_vlsseg5_nxv1i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8(,,,,,, i8*, i32, , i32) + +define @test_vlsseg6_nxv1i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8(,,,,,,, i8*, i32, , i32) + +define @test_vlsseg7_nxv1i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8(,,,,,,,, i8*, i32, , i32) + +define @test_vlsseg8_nxv1i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv16i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16i8(,, i8*, i32, , i32) + +define @test_vlsseg2_nxv16i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg2e8.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv16i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg2e8.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,tu,mu +; CHECK-NEXT: vlsseg2e8.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv16i8(i8*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv16i8(,,, i8*, i32, , i32) + +define @test_vlsseg3_nxv16i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg3e8.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv16i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg3e8.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,tu,mu +; CHECK-NEXT: vlsseg3e8.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv16i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8(,,,, i8*, i32, , i32) + +define @test_vlsseg4_nxv16i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg4e8.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv16i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg4e8.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,tu,mu +; CHECK-NEXT: vlsseg4e8.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2i32(i32*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i32(,, i32*, i32, , i32) + +define @test_vlsseg2_nxv2i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2i32(i32*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i32(,,, i32*, i32, , i32) + +define @test_vlsseg3_nxv2i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32(,,,, i32*, i32, , i32) + +define @test_vlsseg4_nxv2i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32(,,,,, i32*, i32, , i32) + +define @test_vlsseg5_nxv2i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32(,,,,,, i32*, i32, , i32) + +define @test_vlsseg6_nxv2i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32(,,,,,,, i32*, i32, , i32) + +define @test_vlsseg7_nxv2i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32(,,,,,,,, i32*, i32, , i32) + +define @test_vlsseg8_nxv2i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4i16(i16*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i16(,, i16*, i32, , i32) + +define @test_vlsseg2_nxv4i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv4i16(i16*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i16(,,, i16*, i32, , i32) + +define @test_vlsseg3_nxv4i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16(,,,, i16*, i32, , i32) + +define @test_vlsseg4_nxv4i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16(,,,,, i16*, i32, , i32) + +define @test_vlsseg5_nxv4i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16(,,,,,, i16*, i32, , i32) + +define @test_vlsseg6_nxv4i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16(,,,,,,, i16*, i32, , i32) + +define @test_vlsseg7_nxv4i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16(,,,,,,,, i16*, i32, , i32) + +define @test_vlsseg8_nxv4i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1i32(i32*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i32(,, i32*, i32, , i32) + +define @test_vlsseg2_nxv1i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1i32(i32*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i32(,,, i32*, i32, , i32) + +define @test_vlsseg3_nxv1i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32(,,,, i32*, i32, , i32) + +define @test_vlsseg4_nxv1i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32(,,,,, i32*, i32, , i32) + +define @test_vlsseg5_nxv1i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32(,,,,,, i32*, i32, , i32) + +define @test_vlsseg6_nxv1i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32(,,,,,,, i32*, i32, , i32) + +define @test_vlsseg7_nxv1i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32(,,,,,,,, i32*, i32, , i32) + +define @test_vlsseg8_nxv1i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv8i16(i16*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i16(,, i16*, i32, , i32) + +define @test_vlsseg2_nxv8i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv8i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv8i16(i16*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i16(,,, i16*, i32, , i32) + +define @test_vlsseg3_nxv8i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv8i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16(,,,, i16*, i32, , i32) + +define @test_vlsseg4_nxv8i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv8i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv8i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i8(,, i8*, i32, , i32) + +define @test_vlsseg2_nxv8i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv8i8(i8*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i8(,,, i8*, i32, , i32) + +define @test_vlsseg3_nxv8i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8(,,,, i8*, i32, , i32) + +define @test_vlsseg4_nxv8i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8(,,,,, i8*, i32, , i32) + +define @test_vlsseg5_nxv8i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8(,,,,,, i8*, i32, , i32) + +define @test_vlsseg6_nxv8i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8(,,,,,,, i8*, i32, , i32) + +define @test_vlsseg7_nxv8i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8(,,,,,,,, i8*, i32, , i32) + +define @test_vlsseg8_nxv8i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv8i32(i32*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i32(,, i32*, i32, , i32) + +define @test_vlsseg2_nxv8i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m4,ta,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv8i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m4,ta,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e32,m4,tu,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i8(,, i8*, i32, , i32) + +define @test_vlsseg2_nxv4i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv4i8(i8*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i8(,,, i8*, i32, , i32) + +define @test_vlsseg3_nxv4i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8(,,,, i8*, i32, , i32) + +define @test_vlsseg4_nxv4i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8(,,,,, i8*, i32, , i32) + +define @test_vlsseg5_nxv4i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8(,,,,,, i8*, i32, , i32) + +define @test_vlsseg6_nxv4i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8(,,,,,,, i8*, i32, , i32) + +define @test_vlsseg7_nxv4i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8(,,,,,,,, i8*, i32, , i32) + +define @test_vlsseg8_nxv4i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1i16(i16*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i16(,, i16*, i32, , i32) + +define @test_vlsseg2_nxv1i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1i16(i16*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i16(,,, i16*, i32, , i32) + +define @test_vlsseg3_nxv1i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16(,,,, i16*, i32, , i32) + +define @test_vlsseg4_nxv1i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16(,,,,, i16*, i32, , i32) + +define @test_vlsseg5_nxv1i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16(,,,,,, i16*, i32, , i32) + +define @test_vlsseg6_nxv1i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16(,,,,,,, i16*, i32, , i32) + +define @test_vlsseg7_nxv1i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16(,,,,,,,, i16*, i32, , i32) + +define @test_vlsseg8_nxv1i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv32i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv32i8(,, i8*, i32, , i32) + +define @test_vlsseg2_nxv32i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m4,ta,mu +; CHECK-NEXT: vlsseg2e8.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv32i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m4,ta,mu +; CHECK-NEXT: vlsseg2e8.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e8,m4,tu,mu +; CHECK-NEXT: vlsseg2e8.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv32i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i8(,, i8*, i32, , i32) + +define @test_vlsseg2_nxv2i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2i8(i8*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i8(,,, i8*, i32, , i32) + +define @test_vlsseg3_nxv2i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8(,,,, i8*, i32, , i32) + +define @test_vlsseg4_nxv2i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8(,,,,, i8*, i32, , i32) + +define @test_vlsseg5_nxv2i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8(,,,,,, i8*, i32, , i32) + +define @test_vlsseg6_nxv2i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8(,,,,,,, i8*, i32, , i32) + +define @test_vlsseg7_nxv2i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8(,,,,,,,, i8*, i32, , i32) + +define @test_vlsseg8_nxv2i8(i8* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2i16(i16*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i16(,, i16*, i32, , i32) + +define @test_vlsseg2_nxv2i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2i16(i16*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i16(,,, i16*, i32, , i32) + +define @test_vlsseg3_nxv2i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16(,,,, i16*, i32, , i32) + +define @test_vlsseg4_nxv2i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16(,,,,, i16*, i32, , i32) + +define @test_vlsseg5_nxv2i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16(,,,,,, i16*, i32, , i32) + +define @test_vlsseg6_nxv2i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16(,,,,,,, i16*, i32, , i32) + +define @test_vlsseg7_nxv2i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16(,,,,,,,, i16*, i32, , i32) + +define @test_vlsseg8_nxv2i16(i16* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4i32(i32*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i32(,, i32*, i32, , i32) + +define @test_vlsseg2_nxv4i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv4i32(i32*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i32(,,, i32*, i32, , i32) + +define @test_vlsseg3_nxv4i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv4i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32(,,,, i32*, i32, , i32) + +define @test_vlsseg4_nxv4i32(i32* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv4i32(i32* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv16f16(half*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16f16(,, half*, i32, , i32) + +define @test_vlsseg2_nxv16f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv16f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e16,m4,tu,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4f64(double*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f64(,, double*, i32, , i32) + +define @test_vlsseg2_nxv4f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu +; CHECK-NEXT: vlsseg2e64.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu +; CHECK-NEXT: vlsseg2e64.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e64,m4,tu,mu +; CHECK-NEXT: vlsseg2e64.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1f64(double*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f64(,, double*, i32, , i32) + +define @test_vlsseg2_nxv1f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg2e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg2e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg2e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1f64(double*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f64(,,, double*, i32, , i32) + +define @test_vlsseg3_nxv1f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg3e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg3e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg3e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f64( %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1f64(double*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64(,,,, double*, i32, , i32) + +define @test_vlsseg4_nxv1f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg4e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg4e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg4e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64(,,,,, double*, i32, , i32) + +define @test_vlsseg5_nxv1f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg5e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg5e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg5e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64(,,,,,, double*, i32, , i32) + +define @test_vlsseg6_nxv1f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg6e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg6e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg6e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64(,,,,,,, double*, i32, , i32) + +define @test_vlsseg7_nxv1f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg7e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg7e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg7e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64(,,,,,,,, double*, i32, , i32) + +define @test_vlsseg8_nxv1f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg8e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg8e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg8e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2f32(float*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f32(,, float*, i32, , i32) + +define @test_vlsseg2_nxv2f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2f32(float*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f32(,,, float*, i32, , i32) + +define @test_vlsseg3_nxv2f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2f32(float*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32(,,,, float*, i32, , i32) + +define @test_vlsseg4_nxv2f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32(,,,,, float*, i32, , i32) + +define @test_vlsseg5_nxv2f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32(,,,,,, float*, i32, , i32) + +define @test_vlsseg6_nxv2f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32(,,,,,,, float*, i32, , i32) + +define @test_vlsseg7_nxv2f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32(,,,,,,,, float*, i32, , i32) + +define @test_vlsseg8_nxv2f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1f16(half*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f16(,, half*, i32, , i32) + +define @test_vlsseg2_nxv1f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1f16(half*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f16(,,, half*, i32, , i32) + +define @test_vlsseg3_nxv1f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1f16(half*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16(,,,, half*, i32, , i32) + +define @test_vlsseg4_nxv1f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16(,,,,, half*, i32, , i32) + +define @test_vlsseg5_nxv1f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16(,,,,,, half*, i32, , i32) + +define @test_vlsseg6_nxv1f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16(,,,,,,, half*, i32, , i32) + +define @test_vlsseg7_nxv1f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16(,,,,,,,, half*, i32, , i32) + +define @test_vlsseg8_nxv1f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1f32(float*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f32(,, float*, i32, , i32) + +define @test_vlsseg2_nxv1f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1f32(float*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f32(,,, float*, i32, , i32) + +define @test_vlsseg3_nxv1f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1f32(float*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32(,,,, float*, i32, , i32) + +define @test_vlsseg4_nxv1f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32(,,,,, float*, i32, , i32) + +define @test_vlsseg5_nxv1f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32(,,,,,, float*, i32, , i32) + +define @test_vlsseg6_nxv1f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32(,,,,,,, float*, i32, , i32) + +define @test_vlsseg7_nxv1f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32(,,,,,,,, float*, i32, , i32) + +define @test_vlsseg8_nxv1f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv8f16(half*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8f16(,, half*, i32, , i32) + +define @test_vlsseg2_nxv8f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv8f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv8f16(half*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8f16(,,, half*, i32, , i32) + +define @test_vlsseg3_nxv8f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv8f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv8f16(half*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16(,,,, half*, i32, , i32) + +define @test_vlsseg4_nxv8f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv8f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv8f32(float*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8f32(,, float*, i32, , i32) + +define @test_vlsseg2_nxv8f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m4,ta,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv8f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m4,ta,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e32,m4,tu,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2f64(double*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f64(,, double*, i32, , i32) + +define @test_vlsseg2_nxv2f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg2e64.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg2e64.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu +; CHECK-NEXT: vlsseg2e64.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2f64(double*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f64(,,, double*, i32, , i32) + +define @test_vlsseg3_nxv2f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg3e64.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg3e64.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu +; CHECK-NEXT: vlsseg3e64.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f64( %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2f64(double*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64(,,,, double*, i32, , i32) + +define @test_vlsseg4_nxv2f64(double* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg4e64.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2f64(double* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg4e64.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu +; CHECK-NEXT: vlsseg4e64.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4f16(half*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f16(,, half*, i32, , i32) + +define @test_vlsseg2_nxv4f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv4f16(half*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f16(,,, half*, i32, , i32) + +define @test_vlsseg3_nxv4f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv4f16(half*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16(,,,, half*, i32, , i32) + +define @test_vlsseg4_nxv4f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16(,,,,, half*, i32, , i32) + +define @test_vlsseg5_nxv4f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16(,,,,,, half*, i32, , i32) + +define @test_vlsseg6_nxv4f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16(,,,,,,, half*, i32, , i32) + +define @test_vlsseg7_nxv4f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16(,,,,,,,, half*, i32, , i32) + +define @test_vlsseg8_nxv4f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2f16(half*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f16(,, half*, i32, , i32) + +define @test_vlsseg2_nxv2f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2f16(half*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f16(,,, half*, i32, , i32) + +define @test_vlsseg3_nxv2f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2f16(half*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16(,,,, half*, i32, , i32) + +define @test_vlsseg4_nxv2f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16(,,,,, half*, i32, , i32) + +define @test_vlsseg5_nxv2f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16(,,,,,, half*, i32, , i32) + +define @test_vlsseg6_nxv2f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16(,,,,,,, half*, i32, , i32) + +define @test_vlsseg7_nxv2f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16(,,,,,,,, half*, i32, , i32) + +define @test_vlsseg8_nxv2f16(half* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4f32(float*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f32(,, float*, i32, , i32) + +define @test_vlsseg2_nxv4f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv4f32(float*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f32(,,, float*, i32, , i32) + +define @test_vlsseg3_nxv4f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv4f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv4f32(float*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32(,,,, float*, i32, , i32) + +define @test_vlsseg4_nxv4f32(float* %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv4f32(float* %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll @@ -0,0 +1,5120 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare {,} @llvm.riscv.vlsseg2.nxv16i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i64, , i64) + +define @test_vlsseg2_nxv16i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv16i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e16,m4,tu,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4i32(i32*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i32(,, i32*, i64, , i64) + +define @test_vlsseg2_nxv4i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv4i32(i32*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i32(,,, i32*, i64, , i64) + +define @test_vlsseg3_nxv4i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv4i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32(,,,, i32*, i64, , i64) + +define @test_vlsseg4_nxv4i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv4i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv16i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16i8(,, i8*, i64, , i64) + +define @test_vlsseg2_nxv16i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg2e8.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv16i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg2e8.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,tu,mu +; CHECK-NEXT: vlsseg2e8.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv16i8(i8*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv16i8(,,, i8*, i64, , i64) + +define @test_vlsseg3_nxv16i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg3e8.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv16i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg3e8.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,tu,mu +; CHECK-NEXT: vlsseg3e8.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv16i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8(,,,, i8*, i64, , i64) + +define @test_vlsseg4_nxv16i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg4e8.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv16i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m2,ta,mu +; CHECK-NEXT: vlsseg4e8.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,tu,mu +; CHECK-NEXT: vlsseg4e8.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1i64(i64*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i64(,, i64*, i64, , i64) + +define @test_vlsseg2_nxv1i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg2e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg2e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg2e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1i64(i64*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i64(,,, i64*, i64, , i64) + +define @test_vlsseg3_nxv1i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg3e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg3e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg3e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i64( %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1i64(i64*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i64(,,,, i64*, i64, , i64) + +define @test_vlsseg4_nxv1i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg4e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg4e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg4e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i64( %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i64(i64*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i64(,,,,, i64*, i64, , i64) + +define @test_vlsseg5_nxv1i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg5e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg5e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg5e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i64( %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(i64*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i64(,,,,,, i64*, i64, , i64) + +define @test_vlsseg6_nxv1i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg6e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg6e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg6e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i64( %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(i64*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i64(,,,,,,, i64*, i64, , i64) + +define @test_vlsseg7_nxv1i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg7e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg7e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg7e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(i64*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i64(,,,,,,,, i64*, i64, , i64) + +define @test_vlsseg8_nxv1i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg8e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg8e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg8e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1i32(i32*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i32(,, i32*, i64, , i64) + +define @test_vlsseg2_nxv1i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1i32(i32*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i32(,,, i32*, i64, , i64) + +define @test_vlsseg3_nxv1i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32(,,,, i32*, i64, , i64) + +define @test_vlsseg4_nxv1i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32(,,,,, i32*, i64, , i64) + +define @test_vlsseg5_nxv1i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32(,,,,,, i32*, i64, , i64) + +define @test_vlsseg6_nxv1i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32(,,,,,,, i32*, i64, , i64) + +define @test_vlsseg7_nxv1i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32(,,,,,,,, i32*, i64, , i64) + +define @test_vlsseg8_nxv1i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv8i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i16(,, i16*, i64, , i64) + +define @test_vlsseg2_nxv8i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv8i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv8i16(i16*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i16(,,, i16*, i64, , i64) + +define @test_vlsseg3_nxv8i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv8i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16(,,,, i16*, i64, , i64) + +define @test_vlsseg4_nxv8i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv8i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i8(,, i8*, i64, , i64) + +define @test_vlsseg2_nxv4i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv4i8(i8*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i8(,,, i8*, i64, , i64) + +define @test_vlsseg3_nxv4i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8(,,,, i8*, i64, , i64) + +define @test_vlsseg4_nxv4i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8(,,,,, i8*, i64, , i64) + +define @test_vlsseg5_nxv4i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8(,,,,,, i8*, i64, , i64) + +define @test_vlsseg6_nxv4i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8(,,,,,,, i8*, i64, , i64) + +define @test_vlsseg7_nxv4i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8(,,,,,,,, i8*, i64, , i64) + +define @test_vlsseg8_nxv4i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf2,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,tu,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i16(,, i16*, i64, , i64) + +define @test_vlsseg2_nxv1i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1i16(i16*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i16(,,, i16*, i64, , i64) + +define @test_vlsseg3_nxv1i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16(,,,, i16*, i64, , i64) + +define @test_vlsseg4_nxv1i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16(,,,,, i16*, i64, , i64) + +define @test_vlsseg5_nxv1i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16(,,,,,, i16*, i64, , i64) + +define @test_vlsseg6_nxv1i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16(,,,,,,, i16*, i64, , i64) + +define @test_vlsseg7_nxv1i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16(,,,,,,,, i16*, i64, , i64) + +define @test_vlsseg8_nxv1i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2i32(i32*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i32(,, i32*, i64, , i64) + +define @test_vlsseg2_nxv2i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2i32(i32*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i32(,,, i32*, i64, , i64) + +define @test_vlsseg3_nxv2i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32(,,,, i32*, i64, , i64) + +define @test_vlsseg4_nxv2i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32(,,,,, i32*, i64, , i64) + +define @test_vlsseg5_nxv2i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32(,,,,,, i32*, i64, , i64) + +define @test_vlsseg6_nxv2i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32(,,,,,,, i32*, i64, , i64) + +define @test_vlsseg7_nxv2i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32(,,,,,,,, i32*, i64, , i64) + +define @test_vlsseg8_nxv2i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv8i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i8(,, i8*, i64, , i64) + +define @test_vlsseg2_nxv8i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv8i8(i8*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i8(,,, i8*, i64, , i64) + +define @test_vlsseg3_nxv8i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8(,,,, i8*, i64, , i64) + +define @test_vlsseg4_nxv8i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8(,,,,, i8*, i64, , i64) + +define @test_vlsseg5_nxv8i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8(,,,,,, i8*, i64, , i64) + +define @test_vlsseg6_nxv8i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8(,,,,,,, i8*, i64, , i64) + +define @test_vlsseg7_nxv8i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8(,,,,,,,, i8*, i64, , i64) + +define @test_vlsseg8_nxv8i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m1,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,tu,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4i64(i64*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i64(,, i64*, i64, , i64) + +define @test_vlsseg2_nxv4i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu +; CHECK-NEXT: vlsseg2e64.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu +; CHECK-NEXT: vlsseg2e64.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e64,m4,tu,mu +; CHECK-NEXT: vlsseg2e64.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i16(,, i16*, i64, , i64) + +define @test_vlsseg2_nxv4i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv4i16(i16*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i16(,,, i16*, i64, , i64) + +define @test_vlsseg3_nxv4i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16(,,,, i16*, i64, , i64) + +define @test_vlsseg4_nxv4i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16(,,,,, i16*, i64, , i64) + +define @test_vlsseg5_nxv4i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16(,,,,,, i16*, i64, , i64) + +define @test_vlsseg6_nxv4i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16(,,,,,,, i16*, i64, , i64) + +define @test_vlsseg7_nxv4i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16(,,,,,,,, i16*, i64, , i64) + +define @test_vlsseg8_nxv4i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i8(,, i8*, i64, , i64) + +define @test_vlsseg2_nxv1i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1i8(i8*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i8(,,, i8*, i64, , i64) + +define @test_vlsseg3_nxv1i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8(,,,, i8*, i64, , i64) + +define @test_vlsseg4_nxv1i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8(,,,,, i8*, i64, , i64) + +define @test_vlsseg5_nxv1i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8(,,,,,, i8*, i64, , i64) + +define @test_vlsseg6_nxv1i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8(,,,,,,, i8*, i64, , i64) + +define @test_vlsseg7_nxv1i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8(,,,,,,,, i8*, i64, , i64) + +define @test_vlsseg8_nxv1i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf8,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,tu,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i8(,, i8*, i64, , i64) + +define @test_vlsseg2_nxv2i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg2e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2i8(i8*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i8(,,, i8*, i64, , i64) + +define @test_vlsseg3_nxv2i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg3e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8(,,,, i8*, i64, , i64) + +define @test_vlsseg4_nxv2i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg4e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8(,,,,, i8*, i64, , i64) + +define @test_vlsseg5_nxv2i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg5e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8(,,,,,, i8*, i64, , i64) + +define @test_vlsseg6_nxv2i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg6e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8(,,,,,,, i8*, i64, , i64) + +define @test_vlsseg7_nxv2i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg7e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8(,,,,,,,, i8*, i64, , i64) + +define @test_vlsseg8_nxv2i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,mf4,ta,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,tu,mu +; CHECK-NEXT: vlsseg8e8.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv8i32(i32*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i32(,, i32*, i64, , i64) + +define @test_vlsseg2_nxv8i32(i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m4,ta,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv8i32(i32* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m4,ta,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e32,m4,tu,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv32i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv32i8(,, i8*, i64, , i64) + +define @test_vlsseg2_nxv32i8(i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e8,m4,ta,mu +; CHECK-NEXT: vlsseg2e8.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv32i8(i8* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e8,m4,ta,mu +; CHECK-NEXT: vlsseg2e8.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e8,m4,tu,mu +; CHECK-NEXT: vlsseg2e8.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv32i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i16(,, i16*, i64, , i64) + +define @test_vlsseg2_nxv2i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2i16(i16*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i16(,,, i16*, i64, , i64) + +define @test_vlsseg3_nxv2i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16(,,,, i16*, i64, , i64) + +define @test_vlsseg4_nxv2i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16(,,,,, i16*, i64, , i64) + +define @test_vlsseg5_nxv2i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16(,,,,,, i16*, i64, , i64) + +define @test_vlsseg6_nxv2i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16(,,,,,,, i16*, i64, , i64) + +define @test_vlsseg7_nxv2i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16(,,,,,,,, i16*, i64, , i64) + +define @test_vlsseg8_nxv2i16(i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2i64(i64*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i64(,, i64*, i64, , i64) + +define @test_vlsseg2_nxv2i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg2e64.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg2e64.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu +; CHECK-NEXT: vlsseg2e64.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2i64(i64*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i64(,,, i64*, i64, , i64) + +define @test_vlsseg3_nxv2i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg3e64.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg3e64.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu +; CHECK-NEXT: vlsseg3e64.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i64( %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2i64(i64*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i64(,,,, i64*, i64, , i64) + +define @test_vlsseg4_nxv2i64(i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg4e64.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2i64(i64* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg4e64.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu +; CHECK-NEXT: vlsseg4e64.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i64( %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv16f16(half*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16f16(,, half*, i64, , i64) + +define @test_vlsseg2_nxv16f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv16f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e16,m4,tu,mu +; CHECK-NEXT: vlsseg2e16.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4f64(double*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f64(,, double*, i64, , i64) + +define @test_vlsseg2_nxv4f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu +; CHECK-NEXT: vlsseg2e64.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu +; CHECK-NEXT: vlsseg2e64.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e64,m4,tu,mu +; CHECK-NEXT: vlsseg2e64.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1f64(double*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f64(,, double*, i64, , i64) + +define @test_vlsseg2_nxv1f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg2e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg2e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg2e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1f64(double*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f64(,,, double*, i64, , i64) + +define @test_vlsseg3_nxv1f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg3e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg3e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg3e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f64( %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1f64(double*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64(,,,, double*, i64, , i64) + +define @test_vlsseg4_nxv1f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg4e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg4e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg4e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64(,,,,, double*, i64, , i64) + +define @test_vlsseg5_nxv1f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg5e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg5e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg5e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64(,,,,,, double*, i64, , i64) + +define @test_vlsseg6_nxv1f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg6e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg6e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg6e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64(,,,,,,, double*, i64, , i64) + +define @test_vlsseg7_nxv1f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg7e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg7e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg7e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64(,,,,,,,, double*, i64, , i64) + +define @test_vlsseg8_nxv1f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg8e64.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vlsseg8e64.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu +; CHECK-NEXT: vlsseg8e64.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2f32(float*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f32(,, float*, i64, , i64) + +define @test_vlsseg2_nxv2f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2f32(float*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f32(,,, float*, i64, , i64) + +define @test_vlsseg3_nxv2f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2f32(float*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32(,,,, float*, i64, , i64) + +define @test_vlsseg4_nxv2f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32(,,,,, float*, i64, , i64) + +define @test_vlsseg5_nxv2f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32(,,,,,, float*, i64, , i64) + +define @test_vlsseg6_nxv2f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32(,,,,,,, float*, i64, , i64) + +define @test_vlsseg7_nxv2f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32(,,,,,,,, float*, i64, , i64) + +define @test_vlsseg8_nxv2f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,tu,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1f16(half*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f16(,, half*, i64, , i64) + +define @test_vlsseg2_nxv1f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1f16(half*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f16(,,, half*, i64, , i64) + +define @test_vlsseg3_nxv1f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1f16(half*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16(,,,, half*, i64, , i64) + +define @test_vlsseg4_nxv1f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16(,,,,, half*, i64, , i64) + +define @test_vlsseg5_nxv1f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16(,,,,,, half*, i64, , i64) + +define @test_vlsseg6_nxv1f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16(,,,,,,, half*, i64, , i64) + +define @test_vlsseg7_nxv1f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16(,,,,,,,, half*, i64, , i64) + +define @test_vlsseg8_nxv1f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf4,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv1f32(float*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f32(,, float*, i64, , i64) + +define @test_vlsseg2_nxv1f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg2e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv1f32(float*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f32(,,, float*, i64, , i64) + +define @test_vlsseg3_nxv1f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg3e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv1f32(float*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32(,,,, float*, i64, , i64) + +define @test_vlsseg4_nxv1f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg4e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32(,,,,, float*, i64, , i64) + +define @test_vlsseg5_nxv1f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg5e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32(,,,,,, float*, i64, , i64) + +define @test_vlsseg6_nxv1f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg6e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32(,,,,,,, float*, i64, , i64) + +define @test_vlsseg7_nxv1f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg7e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32(,,,,,,,, float*, i64, , i64) + +define @test_vlsseg8_nxv1f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,mf2,ta,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,tu,mu +; CHECK-NEXT: vlsseg8e32.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv8f16(half*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8f16(,, half*, i64, , i64) + +define @test_vlsseg2_nxv8f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv8f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg2e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv8f16(half*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8f16(,,, half*, i64, , i64) + +define @test_vlsseg3_nxv8f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv8f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg3e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv8f16(half*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16(,,,, half*, i64, , i64) + +define @test_vlsseg4_nxv8f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv8f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,tu,mu +; CHECK-NEXT: vlsseg4e16.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv8f32(float*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8f32(,, float*, i64, , i64) + +define @test_vlsseg2_nxv8f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m4,ta,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv8f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m4,ta,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1 +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vsetvli a2, a2, e32,m4,tu,mu +; CHECK-NEXT: vlsseg2e32.v v12, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2f64(double*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f64(,, double*, i64, , i64) + +define @test_vlsseg2_nxv2f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg2e64.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg2e64.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu +; CHECK-NEXT: vlsseg2e64.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2f64(double*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f64(,,, double*, i64, , i64) + +define @test_vlsseg3_nxv2f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg3e64.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg3e64.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu +; CHECK-NEXT: vlsseg3e64.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f64( %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2f64(double*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64(,,,, double*, i64, , i64) + +define @test_vlsseg4_nxv2f64(double* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg4e64.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2f64(double* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vlsseg4e64.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu +; CHECK-NEXT: vlsseg4e64.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4f16(half*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f16(,, half*, i64, , i64) + +define @test_vlsseg2_nxv4f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv4f16(half*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f16(,,, half*, i64, , i64) + +define @test_vlsseg3_nxv4f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv4f16(half*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16(,,,, half*, i64, , i64) + +define @test_vlsseg4_nxv4f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16(,,,,, half*, i64, , i64) + +define @test_vlsseg5_nxv4f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16(,,,,,, half*, i64, , i64) + +define @test_vlsseg6_nxv4f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16(,,,,,,, half*, i64, , i64) + +define @test_vlsseg7_nxv4f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16(,,,,,,,, half*, i64, , i64) + +define @test_vlsseg8_nxv4f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv2f16(half*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f16(,, half*, i64, , i64) + +define @test_vlsseg2_nxv2f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg2e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv2f16(half*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f16(,,, half*, i64, , i64) + +define @test_vlsseg3_nxv2f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg3e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv2f16(half*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16(,,,, half*, i64, , i64) + +define @test_vlsseg4_nxv2f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg4e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16(,,,,, half*, i64, , i64) + +define @test_vlsseg5_nxv2f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg5_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg5e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16(,,,,,, half*, i64, , i64) + +define @test_vlsseg6_nxv2f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg6_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg6e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16(,,,,,,, half*, i64, , i64) + +define @test_vlsseg7_nxv2f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg7_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg7e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16(,,,,,,,, half*, i64, , i64) + +define @test_vlsseg8_nxv2f16(half* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlsseg8_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e16,mf2,ta,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1 +; CHECK-NEXT: vmv1r.v v16, v15 +; CHECK-NEXT: vmv1r.v v17, v15 +; CHECK-NEXT: vmv1r.v v18, v15 +; CHECK-NEXT: vmv1r.v v19, v15 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,tu,mu +; CHECK-NEXT: vlsseg8e16.v v15, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlsseg2.nxv4f32(float*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f32(,, float*, i64, , i64) + +define @test_vlsseg2_nxv4f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlsseg2_mask_nxv4f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg2e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlsseg3.nxv4f32(float*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f32(,,, float*, i64, , i64) + +define @test_vlsseg3_nxv4f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlsseg3_mask_nxv4f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg3e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlsseg4.nxv4f32(float*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32(,,,, float*, i64, , i64) + +define @test_vlsseg4_nxv4f32(float* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlsseg4_mask_nxv4f32(float* %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1 +; CHECK-NEXT: vmv2r.v v16, v14 +; CHECK-NEXT: vmv2r.v v18, v14 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,tu,mu +; CHECK-NEXT: vlsseg4e32.v v14, (a0), a1, v0.t +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +}